xref: /netbsd-src/sys/arch/sun3/sun3x/pmap.c (revision 481fca6e59249d8ffcf24fef7cfbe7b131bfb080)
1 /*	$NetBSD: pmap.c,v 1.54 2000/06/29 07:19:14 mrg Exp $	*/
2 
3 /*-
4  * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jeremy Cooper.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * XXX These comments aren't quite accurate.  Need to change.
41  * The sun3x uses the MC68851 Memory Management Unit, which is built
42  * into the CPU.  The 68851 maps virtual to physical addresses using
43  * a multi-level table lookup, which is stored in the very memory that
44  * it maps.  The number of levels of lookup is configurable from one
45  * to four.  In this implementation, we use three, named 'A' through 'C'.
46  *
47  * The MMU translates virtual addresses into physical addresses by
48  * traversing these tables in a proccess called a 'table walk'.  The most
49  * significant 7 bits of the Virtual Address ('VA') being translated are
50  * used as an index into the level A table, whose base in physical memory
51  * is stored in a special MMU register, the 'CPU Root Pointer' or CRP.  The
52  * address found at that index in the A table is used as the base
53  * address for the next table, the B table.  The next six bits of the VA are
54  * used as an index into the B table, which in turn gives the base address
55  * of the third and final C table.
56  *
57  * The next six bits of the VA are used as an index into the C table to
58  * locate a Page Table Entry (PTE).  The PTE is a physical address in memory
59  * to which the remaining 13 bits of the VA are added, producing the
60  * mapped physical address.
61  *
62  * To map the entire memory space in this manner would require 2114296 bytes
63  * of page tables per process - quite expensive.  Instead we will
64  * allocate a fixed but considerably smaller space for the page tables at
65  * the time the VM system is initialized.  When the pmap code is asked by
66  * the kernel to map a VA to a PA, it allocates tables as needed from this
67  * pool.  When there are no more tables in the pool, tables are stolen
68  * from the oldest mapped entries in the tree.  This is only possible
69  * because all memory mappings are stored in the kernel memory map
70  * structures, independent of the pmap structures.  A VA which references
71  * one of these invalidated maps will cause a page fault.  The kernel
72  * will determine that the page fault was caused by a task using a valid
73  * VA, but for some reason (which does not concern it), that address was
74  * not mapped.  It will ask the pmap code to re-map the entry and then
75  * it will resume executing the faulting task.
76  *
77  * In this manner the most efficient use of the page table space is
78  * achieved.  Tasks which do not execute often will have their tables
79  * stolen and reused by tasks which execute more frequently.  The best
80  * size for the page table pool will probably be determined by
81  * experimentation.
82  *
83  * You read all of the comments so far.  Good for you.
84  * Now go play!
85  */
86 
87 /*** A Note About the 68851 Address Translation Cache
88  * The MC68851 has a 64 entry cache, called the Address Translation Cache
89  * or 'ATC'.  This cache stores the most recently used page descriptors
90  * accessed by the MMU when it does translations.  Using a marker called a
91  * 'task alias' the MMU can store the descriptors from 8 different table
92  * spaces concurrently.  The task alias is associated with the base
93  * address of the level A table of that address space.  When an address
94  * space is currently active (the CRP currently points to its A table)
95  * the only cached descriptors that will be obeyed are ones which have a
96  * matching task alias of the current space associated with them.
97  *
98  * Since the cache is always consulted before any table lookups are done,
99  * it is important that it accurately reflect the state of the MMU tables.
100  * Whenever a change has been made to a table that has been loaded into
101  * the MMU, the code must be sure to flush any cached entries that are
102  * affected by the change.  These instances are documented in the code at
103  * various points.
104  */
105 /*** A Note About the Note About the 68851 Address Translation Cache
106  * 4 months into this code I discovered that the sun3x does not have
107  * a MC68851 chip. Instead, it has a version of this MMU that is part of the
108  * the 68030 CPU.
109  * All though it behaves very similarly to the 68851, it only has 1 task
110  * alias and a 22 entry cache.  So sadly (or happily), the first paragraph
111  * of the previous note does not apply to the sun3x pmap.
112  */
113 
114 #include "opt_ddb.h"
115 
116 #include <sys/param.h>
117 #include <sys/systm.h>
118 #include <sys/proc.h>
119 #include <sys/malloc.h>
120 #include <sys/user.h>
121 #include <sys/queue.h>
122 #include <sys/kcore.h>
123 
124 #include <uvm/uvm.h>
125 
126 #define PAGER_SVA (uvm.pager_sva)
127 #define PAGER_EVA (uvm.pager_eva)
128 
129 #include <machine/cpu.h>
130 #include <machine/kcore.h>
131 #include <machine/mon.h>
132 #include <machine/pmap.h>
133 #include <machine/pte.h>
134 #include <machine/vmparam.h>
135 
136 #include <sun3/sun3/cache.h>
137 #include <sun3/sun3/machdep.h>
138 
139 #include "pmap_pvt.h"
140 
141 /* XXX - What headers declare these? */
142 extern struct pcb *curpcb;
143 extern int physmem;
144 
145 extern void copypage __P((const void*, void*));
146 extern void zeropage __P((void*));
147 
148 /* Defined in locore.s */
149 extern char kernel_text[];
150 
151 /* Defined by the linker */
152 extern char etext[], edata[], end[];
153 extern char *esym;	/* DDB */
154 
155 /*************************** DEBUGGING DEFINITIONS ***********************
156  * Macros, preprocessor defines and variables used in debugging can make *
157  * code hard to read.  Anything used exclusively for debugging purposes  *
158  * is defined here to avoid having such mess scattered around the file.  *
159  *************************************************************************/
160 #ifdef	PMAP_DEBUG
161 /*
162  * To aid the debugging process, macros should be expanded into smaller steps
163  * that accomplish the same goal, yet provide convenient places for placing
164  * breakpoints.  When this code is compiled with PMAP_DEBUG mode defined, the
165  * 'INLINE' keyword is defined to an empty string.  This way, any function
166  * defined to be a 'static INLINE' will become 'outlined' and compiled as
167  * a separate function, which is much easier to debug.
168  */
169 #define	INLINE	/* nothing */
170 
171 /*
172  * It is sometimes convenient to watch the activity of a particular table
173  * in the system.  The following variables are used for that purpose.
174  */
175 a_tmgr_t *pmap_watch_atbl = 0;
176 b_tmgr_t *pmap_watch_btbl = 0;
177 c_tmgr_t *pmap_watch_ctbl = 0;
178 
179 int pmap_debug = 0;
180 #define DPRINT(args) if (pmap_debug) printf args
181 
182 #else	/********** Stuff below is defined if NOT debugging **************/
183 
184 #define	INLINE	inline
185 #define DPRINT(args)  /* nada */
186 
187 #endif	/* PMAP_DEBUG */
188 /*********************** END OF DEBUGGING DEFINITIONS ********************/
189 
190 /*** Management Structure - Memory Layout
191  * For every MMU table in the sun3x pmap system there must be a way to
192  * manage it; we must know which process is using it, what other tables
193  * depend on it, and whether or not it contains any locked pages.  This
194  * is solved by the creation of 'table management'  or 'tmgr'
195  * structures.  One for each MMU table in the system.
196  *
197  *                        MAP OF MEMORY USED BY THE PMAP SYSTEM
198  *
199  *      towards lower memory
200  * kernAbase -> +-------------------------------------------------------+
201  *              | Kernel     MMU A level table                          |
202  * kernBbase -> +-------------------------------------------------------+
203  *              | Kernel     MMU B level tables                         |
204  * kernCbase -> +-------------------------------------------------------+
205  *              |                                                       |
206  *              | Kernel     MMU C level tables                         |
207  *              |                                                       |
208  * mmuCbase  -> +-------------------------------------------------------+
209  *              | User       MMU C level tables                         |
210  * mmuAbase  -> +-------------------------------------------------------+
211  *              |                                                       |
212  *              | User       MMU A level tables                         |
213  *              |                                                       |
214  * mmuBbase  -> +-------------------------------------------------------+
215  *              | User       MMU B level tables                         |
216  * tmgrAbase -> +-------------------------------------------------------+
217  *              |  TMGR A level table structures                        |
218  * tmgrBbase -> +-------------------------------------------------------+
219  *              |  TMGR B level table structures                        |
220  * tmgrCbase -> +-------------------------------------------------------+
221  *              |  TMGR C level table structures                        |
222  * pvbase    -> +-------------------------------------------------------+
223  *              |  Physical to Virtual mapping table (list heads)       |
224  * pvebase   -> +-------------------------------------------------------+
225  *              |  Physical to Virtual mapping table (list elements)    |
226  *              |                                                       |
227  *              +-------------------------------------------------------+
228  *      towards higher memory
229  *
230  * For every A table in the MMU A area, there will be a corresponding
231  * a_tmgr structure in the TMGR A area.  The same will be true for
232  * the B and C tables.  This arrangement will make it easy to find the
233  * controling tmgr structure for any table in the system by use of
234  * (relatively) simple macros.
235  */
236 
237 /*
238  * Global variables for storing the base addresses for the areas
239  * labeled above.
240  */
241 static vm_offset_t  	kernAphys;
242 static mmu_long_dte_t	*kernAbase;
243 static mmu_short_dte_t	*kernBbase;
244 static mmu_short_pte_t	*kernCbase;
245 static mmu_short_pte_t	*mmuCbase;
246 static mmu_short_dte_t	*mmuBbase;
247 static mmu_long_dte_t	*mmuAbase;
248 static a_tmgr_t		*Atmgrbase;
249 static b_tmgr_t		*Btmgrbase;
250 static c_tmgr_t		*Ctmgrbase;
251 static pv_t 		*pvbase;
252 static pv_elem_t	*pvebase;
253 struct pmap 		kernel_pmap;
254 
255 /*
256  * This holds the CRP currently loaded into the MMU.
257  */
258 struct mmu_rootptr kernel_crp;
259 
260 /*
261  * Just all around global variables.
262  */
263 static TAILQ_HEAD(a_pool_head_struct, a_tmgr_struct) a_pool;
264 static TAILQ_HEAD(b_pool_head_struct, b_tmgr_struct) b_pool;
265 static TAILQ_HEAD(c_pool_head_struct, c_tmgr_struct) c_pool;
266 
267 
268 /*
269  * Flags used to mark the safety/availability of certain operations or
270  * resources.
271  */
272 static boolean_t pv_initialized = FALSE, /* PV system has been initialized. */
273        bootstrap_alloc_enabled = FALSE; /*Safe to use pmap_bootstrap_alloc().*/
274 int tmp_vpages_inuse;	/* Temporary virtual pages are in use */
275 
276 /*
277  * XXX:  For now, retain the traditional variables that were
278  * used in the old pmap/vm interface (without NONCONTIG).
279  */
280 /* Kernel virtual address space available: */
281 vm_offset_t	virtual_avail, virtual_end;
282 /* Physical address space available: */
283 vm_offset_t	avail_start, avail_end;
284 
285 /* This keep track of the end of the contiguously mapped range. */
286 vm_offset_t virtual_contig_end;
287 
288 /* Physical address used by pmap_next_page() */
289 vm_offset_t avail_next;
290 
291 /* These are used by pmap_copy_page(), etc. */
292 vm_offset_t tmp_vpages[2];
293 
294 /*
295  * The 3/80 is the only member of the sun3x family that has non-contiguous
296  * physical memory.  Memory is divided into 4 banks which are physically
297  * locatable on the system board.  Although the size of these banks varies
298  * with the size of memory they contain, their base addresses are
299  * permenently fixed.  The following structure, which describes these
300  * banks, is initialized by pmap_bootstrap() after it reads from a similar
301  * structure provided by the ROM Monitor.
302  *
303  * For the other machines in the sun3x architecture which do have contiguous
304  * RAM, this list will have only one entry, which will describe the entire
305  * range of available memory.
306  */
307 struct pmap_physmem_struct avail_mem[SUN3X_NPHYS_RAM_SEGS];
308 u_int total_phys_mem;
309 
310 /*************************************************************************/
311 
312 /*
313  * XXX - Should "tune" these based on statistics.
314  *
315  * My first guess about the relative numbers of these needed is
316  * based on the fact that a "typical" process will have several
317  * pages mapped at low virtual addresses (text, data, bss), then
318  * some mapped shared libraries, and then some stack pages mapped
319  * near the high end of the VA space.  Each process can use only
320  * one A table, and most will use only two B tables (maybe three)
321  * and probably about four C tables.  Therefore, the first guess
322  * at the relative numbers of these needed is 1:2:4 -gwr
323  *
324  * The number of C tables needed is closely related to the amount
325  * of physical memory available plus a certain amount attributable
326  * to the use of double mappings.  With a few simulation statistics
327  * we can find a reasonably good estimation of this unknown value.
328  * Armed with that and the above ratios, we have a good idea of what
329  * is needed at each level. -j
330  *
331  * Note: It is not physical memory memory size, but the total mapped
332  * virtual space required by the combined working sets of all the
333  * currently _runnable_ processes.  (Sleeping ones don't count.)
334  * The amount of physical memory should be irrelevant. -gwr
335  */
336 #ifdef	FIXED_NTABLES
337 #define NUM_A_TABLES	16
338 #define NUM_B_TABLES	32
339 #define NUM_C_TABLES	64
340 #else
341 unsigned int	NUM_A_TABLES, NUM_B_TABLES, NUM_C_TABLES;
342 #endif	/* FIXED_NTABLES */
343 
344 /*
345  * This determines our total virtual mapping capacity.
346  * Yes, it is a FIXED value so we can pre-allocate.
347  */
348 #define NUM_USER_PTES	(NUM_C_TABLES * MMU_C_TBL_SIZE)
349 
350 /*
351  * The size of the Kernel Virtual Address Space (KVAS)
352  * for purposes of MMU table allocation is -KERNBASE
353  * (length from KERNBASE to 0xFFFFffff)
354  */
355 #define	KVAS_SIZE		(-KERNBASE)
356 
357 /* Numbers of kernel MMU tables to support KVAS_SIZE. */
358 #define KERN_B_TABLES	(KVAS_SIZE >> MMU_TIA_SHIFT)
359 #define KERN_C_TABLES	(KVAS_SIZE >> MMU_TIB_SHIFT)
360 #define	NUM_KERN_PTES	(KVAS_SIZE >> MMU_TIC_SHIFT)
361 
362 /*************************** MISCELANEOUS MACROS *************************/
363 #define PMAP_LOCK()	;	/* Nothing, for now */
364 #define PMAP_UNLOCK()	;	/* same. */
365 #define	NULL 0
366 
367 static INLINE void *      mmu_ptov __P((vm_offset_t pa));
368 static INLINE vm_offset_t mmu_vtop __P((void * va));
369 
370 #if	0
371 static INLINE a_tmgr_t * mmuA2tmgr __P((mmu_long_dte_t *));
372 #endif /* 0 */
373 static INLINE b_tmgr_t * mmuB2tmgr __P((mmu_short_dte_t *));
374 static INLINE c_tmgr_t * mmuC2tmgr __P((mmu_short_pte_t *));
375 
376 static INLINE pv_t *pa2pv __P((vm_offset_t pa));
377 static INLINE int   pteidx __P((mmu_short_pte_t *));
378 static INLINE pmap_t current_pmap __P((void));
379 
380 /*
381  * We can always convert between virtual and physical addresses
382  * for anything in the range [KERNBASE ... avail_start] because
383  * that range is GUARANTEED to be mapped linearly.
384  * We rely heavily upon this feature!
385  */
386 static INLINE void *
387 mmu_ptov(pa)
388 	vm_offset_t pa;
389 {
390 	register vm_offset_t va;
391 
392 	va = (pa + KERNBASE);
393 #ifdef	PMAP_DEBUG
394 	if ((va < KERNBASE) || (va >= virtual_contig_end))
395 		panic("mmu_ptov");
396 #endif
397 	return ((void*)va);
398 }
399 static INLINE vm_offset_t
400 mmu_vtop(vva)
401 	void *vva;
402 {
403 	register vm_offset_t va;
404 
405 	va = (vm_offset_t)vva;
406 #ifdef	PMAP_DEBUG
407 	if ((va < KERNBASE) || (va >= virtual_contig_end))
408 		panic("mmu_ptov");
409 #endif
410 	return (va - KERNBASE);
411 }
412 
413 /*
414  * These macros map MMU tables to their corresponding manager structures.
415  * They are needed quite often because many of the pointers in the pmap
416  * system reference MMU tables and not the structures that control them.
417  * There needs to be a way to find one when given the other and these
418  * macros do so by taking advantage of the memory layout described above.
419  * Here's a quick step through the first macro, mmuA2tmgr():
420  *
421  * 1) find the offset of the given MMU A table from the base of its table
422  *    pool (table - mmuAbase).
423  * 2) convert this offset into a table index by dividing it by the
424  *    size of one MMU 'A' table. (sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE)
425  * 3) use this index to select the corresponding 'A' table manager
426  *    structure from the 'A' table manager pool (Atmgrbase[index]).
427  */
428 /*  This function is not currently used. */
429 #if	0
430 static INLINE a_tmgr_t *
431 mmuA2tmgr(mmuAtbl)
432 	mmu_long_dte_t *mmuAtbl;
433 {
434 	register int idx;
435 
436 	/* Which table is this in? */
437 	idx = (mmuAtbl - mmuAbase) / MMU_A_TBL_SIZE;
438 #ifdef	PMAP_DEBUG
439 	if ((idx < 0) || (idx >= NUM_A_TABLES))
440 		panic("mmuA2tmgr");
441 #endif
442 	return (&Atmgrbase[idx]);
443 }
444 #endif	/* 0 */
445 
446 static INLINE b_tmgr_t *
447 mmuB2tmgr(mmuBtbl)
448 	mmu_short_dte_t *mmuBtbl;
449 {
450 	register int idx;
451 
452 	/* Which table is this in? */
453 	idx = (mmuBtbl - mmuBbase) / MMU_B_TBL_SIZE;
454 #ifdef	PMAP_DEBUG
455 	if ((idx < 0) || (idx >= NUM_B_TABLES))
456 		panic("mmuB2tmgr");
457 #endif
458 	return (&Btmgrbase[idx]);
459 }
460 
461 /* mmuC2tmgr			INTERNAL
462  **
463  * Given a pte known to belong to a C table, return the address of
464  * that table's management structure.
465  */
466 static INLINE c_tmgr_t *
467 mmuC2tmgr(mmuCtbl)
468 	mmu_short_pte_t *mmuCtbl;
469 {
470 	register int idx;
471 
472 	/* Which table is this in? */
473 	idx = (mmuCtbl - mmuCbase) / MMU_C_TBL_SIZE;
474 #ifdef	PMAP_DEBUG
475 	if ((idx < 0) || (idx >= NUM_C_TABLES))
476 		panic("mmuC2tmgr");
477 #endif
478 	return (&Ctmgrbase[idx]);
479 }
480 
481 /* This is now a function call below.
482  * #define pa2pv(pa) \
483  *	(&pvbase[(unsigned long)\
484  *		m68k_btop(pa)\
485  *	])
486  */
487 
488 /* pa2pv			INTERNAL
489  **
490  * Return the pv_list_head element which manages the given physical
491  * address.
492  */
493 static INLINE pv_t *
494 pa2pv(pa)
495 	vm_offset_t pa;
496 {
497 	register struct pmap_physmem_struct *bank;
498 	register int idx;
499 
500 	bank = &avail_mem[0];
501 	while (pa >= bank->pmem_end)
502 		bank = bank->pmem_next;
503 
504 	pa -= bank->pmem_start;
505 	idx = bank->pmem_pvbase + m68k_btop(pa);
506 #ifdef	PMAP_DEBUG
507 	if ((idx < 0) || (idx >= physmem))
508 		panic("pa2pv");
509 #endif
510 	return &pvbase[idx];
511 }
512 
513 /* pteidx			INTERNAL
514  **
515  * Return the index of the given PTE within the entire fixed table of
516  * PTEs.
517  */
518 static INLINE int
519 pteidx(pte)
520 	mmu_short_pte_t *pte;
521 {
522 	return (pte - kernCbase);
523 }
524 
525 /*
526  * This just offers a place to put some debugging checks,
527  * and reduces the number of places "curproc" appears...
528  */
529 static INLINE pmap_t
530 current_pmap()
531 {
532 	struct proc *p;
533 	struct vmspace *vm;
534 	vm_map_t	map;
535 	pmap_t	pmap;
536 
537 	p = curproc;	/* XXX */
538 	if (p == NULL)
539 		pmap = &kernel_pmap;
540 	else {
541 		vm = p->p_vmspace;
542 		map = &vm->vm_map;
543 		pmap = vm_map_pmap(map);
544 	}
545 
546 	return (pmap);
547 }
548 
549 
550 /*************************** FUNCTION DEFINITIONS ************************
551  * These appear here merely for the compiler to enforce type checking on *
552  * all function calls.                                                   *
553  *************************************************************************/
554 
555 /** External functions
556  ** - functions used within this module but written elsewhere.
557  **   both of these functions are in locore.s
558  ** XXX - These functions were later replaced with their more cryptic
559  **       hp300 counterparts.  They may be removed now.
560  **/
561 #if	0	/* deprecated mmu */
562 void   mmu_seturp __P((vm_offset_t));
563 void   mmu_flush __P((int, vm_offset_t));
564 void   mmu_flusha __P((void));
565 #endif	/* 0 */
566 
567 /** Internal functions
568  ** Most functions used only within this module are defined in
569  **   pmap_pvt.h (why not here if used only here?)
570  **/
571 static void pmap_page_upload __P((void));
572 
573 /** Interface functions
574  ** - functions required by the Mach VM Pmap interface, with MACHINE_CONTIG
575  **   defined.
576  **/
577 int    pmap_page_index __P((vm_offset_t));
578 void pmap_pinit __P((pmap_t));
579 void pmap_release __P((pmap_t));
580 
581 /********************************** CODE ********************************
582  * Functions that are called from other parts of the kernel are labeled *
583  * as 'INTERFACE' functions.  Functions that are only called from       *
584  * within the pmap module are labeled as 'INTERNAL' functions.          *
585  * Functions that are internal, but are not (currently) used at all are *
586  * labeled 'INTERNAL_X'.                                                *
587  ************************************************************************/
588 
589 /* pmap_bootstrap			INTERNAL
590  **
591  * Initializes the pmap system.  Called at boot time from
592  * locore2.c:_vm_init()
593  *
594  * Reminder: having a pmap_bootstrap_alloc() and also having the VM
595  *           system implement pmap_steal_memory() is redundant.
596  *           Don't release this code without removing one or the other!
597  */
598 void
599 pmap_bootstrap(nextva)
600 	vm_offset_t nextva;
601 {
602 	struct physmemory *membank;
603 	struct pmap_physmem_struct *pmap_membank;
604 	vm_offset_t va, pa, eva;
605 	int b, c, i, j;	/* running table counts */
606 	int size, resvmem;
607 
608 	/*
609 	 * This function is called by __bootstrap after it has
610 	 * determined the type of machine and made the appropriate
611 	 * patches to the ROM vectors (XXX- I don't quite know what I meant
612 	 * by that.)  It allocates and sets up enough of the pmap system
613 	 * to manage the kernel's address space.
614 	 */
615 
616 	/*
617 	 * Determine the range of kernel virtual and physical
618 	 * space available. Note that we ABSOLUTELY DEPEND on
619 	 * the fact that the first bank of memory (4MB) is
620 	 * mapped linearly to KERNBASE (which we guaranteed in
621 	 * the first instructions of locore.s).
622 	 * That is plenty for our bootstrap work.
623 	 */
624 	virtual_avail = m68k_round_page(nextva);
625 	virtual_contig_end = KERNBASE + 0x400000; /* +4MB */
626 	virtual_end = VM_MAX_KERNEL_ADDRESS;
627 	/* Don't need avail_start til later. */
628 
629 	/* We may now call pmap_bootstrap_alloc(). */
630 	bootstrap_alloc_enabled = TRUE;
631 
632 	/*
633 	 * This is a somewhat unwrapped loop to deal with
634 	 * copying the PROM's 'phsymem' banks into the pmap's
635 	 * banks.  The following is always assumed:
636 	 * 1. There is always at least one bank of memory.
637 	 * 2. There is always a last bank of memory, and its
638 	 *    pmem_next member must be set to NULL.
639 	 */
640 	membank = romVectorPtr->v_physmemory;
641 	pmap_membank = avail_mem;
642 	total_phys_mem = 0;
643 
644 	for (;;) { /* break on !membank */
645 		pmap_membank->pmem_start = membank->address;
646 		pmap_membank->pmem_end = membank->address + membank->size;
647 		total_phys_mem += membank->size;
648 		membank = membank->next;
649 		if (!membank)
650 			break;
651 		/* This silly syntax arises because pmap_membank
652 		 * is really a pre-allocated array, but it is put into
653 		 * use as a linked list.
654 		 */
655 		pmap_membank->pmem_next = pmap_membank + 1;
656 		pmap_membank = pmap_membank->pmem_next;
657 	}
658 	/* This is the last element. */
659 	pmap_membank->pmem_next = NULL;
660 
661 	/*
662 	 * Note: total_phys_mem, physmem represent
663 	 * actual physical memory, including that
664 	 * reserved for the PROM monitor.
665 	 */
666 	physmem = btoc(total_phys_mem);
667 
668 	/*
669 	 * The last bank of memory should be reduced to prevent the
670 	 * physical pages needed by the PROM monitor from being used
671 	 * in the VM system.
672 	 */
673 	resvmem = total_phys_mem - *(romVectorPtr->memoryAvail);
674 	resvmem = m68k_round_page(resvmem);
675 	pmap_membank->pmem_end -= resvmem;
676 
677 	/*
678 	 * Avail_end is set to the first byte of physical memory
679 	 * after the end of the last bank.  We use this only to
680 	 * determine if a physical address is "managed" memory.
681 	 */
682 	avail_end = pmap_membank->pmem_end;
683 
684 	/*
685 	 * First allocate enough kernel MMU tables to map all
686 	 * of kernel virtual space from KERNBASE to 0xFFFFFFFF.
687 	 * Note: All must be aligned on 256 byte boundaries.
688 	 * Start with the level-A table (one of those).
689 	 */
690 	size = sizeof(mmu_long_dte_t)  * MMU_A_TBL_SIZE;
691 	kernAbase = pmap_bootstrap_alloc(size);
692 	bzero(kernAbase, size);
693 
694 	/* Now the level-B kernel tables... */
695 	size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * KERN_B_TABLES;
696 	kernBbase = pmap_bootstrap_alloc(size);
697 	bzero(kernBbase, size);
698 
699 	/* Now the level-C kernel tables... */
700 	size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * KERN_C_TABLES;
701 	kernCbase = pmap_bootstrap_alloc(size);
702 	bzero(kernCbase, size);
703 	/*
704 	 * Note: In order for the PV system to work correctly, the kernel
705 	 * and user-level C tables must be allocated contiguously.
706 	 * Nothing should be allocated between here and the allocation of
707 	 * mmuCbase below.  XXX: Should do this as one allocation, and
708 	 * then compute a pointer for mmuCbase instead of this...
709 	 *
710 	 * Allocate user MMU tables.
711 	 * These must be contiguous with the preceeding.
712 	 */
713 
714 #ifndef	FIXED_NTABLES
715 	/*
716 	 * The number of user-level C tables that should be allocated is
717 	 * related to the size of physical memory.  In general, there should
718 	 * be enough tables to map four times the amount of available RAM.
719 	 * The extra amount is needed because some table space is wasted by
720 	 * fragmentation.
721 	 */
722 	NUM_C_TABLES = (total_phys_mem * 4) / (MMU_C_TBL_SIZE * MMU_PAGE_SIZE);
723 	NUM_B_TABLES = NUM_C_TABLES / 2;
724 	NUM_A_TABLES = NUM_B_TABLES / 2;
725 #endif	/* !FIXED_NTABLES */
726 
727 	size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE	* NUM_C_TABLES;
728 	mmuCbase = pmap_bootstrap_alloc(size);
729 
730 	size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE	* NUM_B_TABLES;
731 	mmuBbase = pmap_bootstrap_alloc(size);
732 
733 	size = sizeof(mmu_long_dte_t)  * MMU_A_TBL_SIZE * NUM_A_TABLES;
734 	mmuAbase = pmap_bootstrap_alloc(size);
735 
736 	/*
737 	 * Fill in the never-changing part of the kernel tables.
738 	 * For simplicity, the kernel's mappings will be editable as a
739 	 * flat array of page table entries at kernCbase.  The
740 	 * higher level 'A' and 'B' tables must be initialized to point
741 	 * to this lower one.
742 	 */
743 	b = c = 0;
744 
745 	/*
746 	 * Invalidate all mappings below KERNBASE in the A table.
747 	 * This area has already been zeroed out, but it is good
748 	 * practice to explicitly show that we are interpreting
749 	 * it as a list of A table descriptors.
750 	 */
751 	for (i = 0; i < MMU_TIA(KERNBASE); i++) {
752 		kernAbase[i].addr.raw = 0;
753 	}
754 
755 	/*
756 	 * Set up the kernel A and B tables so that they will reference the
757 	 * correct spots in the contiguous table of PTEs allocated for the
758 	 * kernel's virtual memory space.
759 	 */
760 	for (i = MMU_TIA(KERNBASE); i < MMU_A_TBL_SIZE; i++) {
761 		kernAbase[i].attr.raw =
762 			MMU_LONG_DTE_LU | MMU_LONG_DTE_SUPV | MMU_DT_SHORT;
763 		kernAbase[i].addr.raw = mmu_vtop(&kernBbase[b]);
764 
765 		for (j=0; j < MMU_B_TBL_SIZE; j++) {
766 			kernBbase[b + j].attr.raw = mmu_vtop(&kernCbase[c])
767 				| MMU_DT_SHORT;
768 			c += MMU_C_TBL_SIZE;
769 		}
770 		b += MMU_B_TBL_SIZE;
771 	}
772 
773 	/* XXX - Doing kernel_pmap a little further down. */
774 
775 	pmap_alloc_usermmu();	/* Allocate user MMU tables.        */
776 	pmap_alloc_usertmgr();	/* Allocate user MMU table managers.*/
777 	pmap_alloc_pv();	/* Allocate physical->virtual map.  */
778 
779 	/*
780 	 * We are now done with pmap_bootstrap_alloc().  Round up
781 	 * `virtual_avail' to the nearest page, and set the flag
782 	 * to prevent use of pmap_bootstrap_alloc() hereafter.
783 	 */
784 	pmap_bootstrap_aalign(NBPG);
785 	bootstrap_alloc_enabled = FALSE;
786 
787 	/*
788 	 * Now that we are done with pmap_bootstrap_alloc(), we
789 	 * must save the virtual and physical addresses of the
790 	 * end of the linearly mapped range, which are stored in
791 	 * virtual_contig_end and avail_start, respectively.
792 	 * These variables will never change after this point.
793 	 */
794 	virtual_contig_end = virtual_avail;
795 	avail_start = virtual_avail - KERNBASE;
796 
797 	/*
798 	 * `avail_next' is a running pointer used by pmap_next_page() to
799 	 * keep track of the next available physical page to be handed
800 	 * to the VM system during its initialization, in which it
801 	 * asks for physical pages, one at a time.
802 	 */
803 	avail_next = avail_start;
804 
805 	/*
806 	 * Now allocate some virtual addresses, but not the physical pages
807 	 * behind them.  Note that virtual_avail is already page-aligned.
808 	 *
809 	 * tmp_vpages[] is an array of two virtual pages used for temporary
810 	 * kernel mappings in the pmap module to facilitate various physical
811 	 * address-oritented operations.
812 	 */
813 	tmp_vpages[0] = virtual_avail;
814 	virtual_avail += NBPG;
815 	tmp_vpages[1] = virtual_avail;
816 	virtual_avail += NBPG;
817 
818 	/** Initialize the PV system **/
819 	pmap_init_pv();
820 
821 	/*
822 	 * Fill in the kernel_pmap structure and kernel_crp.
823 	 */
824 	kernAphys = mmu_vtop(kernAbase);
825 	kernel_pmap.pm_a_tmgr = NULL;
826 	kernel_pmap.pm_a_phys = kernAphys;
827 	kernel_pmap.pm_refcount = 1; /* always in use */
828 
829 	kernel_crp.rp_attr = MMU_LONG_DTE_LU | MMU_DT_LONG;
830 	kernel_crp.rp_addr = kernAphys;
831 
832 	/*
833 	 * Now pmap_enter_kernel() may be used safely and will be
834 	 * the main interface used hereafter to modify the kernel's
835 	 * virtual address space.  Note that since we are still running
836 	 * under the PROM's address table, none of these table modifications
837 	 * actually take effect until pmap_takeover_mmu() is called.
838 	 *
839 	 * Note: Our tables do NOT have the PROM linear mappings!
840 	 * Only the mappings created here exist in our tables, so
841 	 * remember to map anything we expect to use.
842 	 */
843 	va = (vm_offset_t) KERNBASE;
844 	pa = 0;
845 
846 	/*
847 	 * The first page of the kernel virtual address space is the msgbuf
848 	 * page.  The page attributes (data, non-cached) are set here, while
849 	 * the address is assigned to this global pointer in cpu_startup().
850 	 * It is non-cached, mostly due to paranoia.
851 	 */
852 	pmap_enter_kernel(va, pa|PMAP_NC, VM_PROT_ALL);
853 	va += NBPG; pa += NBPG;
854 
855 	/* Next page is used as the temporary stack. */
856 	pmap_enter_kernel(va, pa, VM_PROT_ALL);
857 	va += NBPG; pa += NBPG;
858 
859 	/*
860 	 * Map all of the kernel's text segment as read-only and cacheable.
861 	 * (Cacheable is implied by default).  Unfortunately, the last bytes
862 	 * of kernel text and the first bytes of kernel data will often be
863 	 * sharing the same page.  Therefore, the last page of kernel text
864 	 * has to be mapped as read/write, to accomodate the data.
865 	 */
866 	eva = m68k_trunc_page((vm_offset_t)etext);
867 	for (; va < eva; va += NBPG, pa += NBPG)
868 		pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_EXECUTE);
869 
870 	/*
871 	 * Map all of the kernel's data as read/write and cacheable.
872 	 * This includes: data, BSS, symbols, and everything in the
873 	 * contiguous memory used by pmap_bootstrap_alloc()
874 	 */
875 	for (; pa < avail_start; va += NBPG, pa += NBPG)
876 		pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_WRITE);
877 
878 	/*
879 	 * At this point we are almost ready to take over the MMU.  But first
880 	 * we must save the PROM's address space in our map, as we call its
881 	 * routines and make references to its data later in the kernel.
882 	 */
883 	pmap_bootstrap_copyprom();
884 	pmap_takeover_mmu();
885 	pmap_bootstrap_setprom();
886 
887 	/* Notify the VM system of our page size. */
888 	PAGE_SIZE = NBPG;
889 	uvm_setpagesize();
890 
891 	pmap_page_upload();
892 }
893 
894 
895 /* pmap_alloc_usermmu			INTERNAL
896  **
897  * Called from pmap_bootstrap() to allocate MMU tables that will
898  * eventually be used for user mappings.
899  */
900 void
901 pmap_alloc_usermmu()
902 {
903 	/* XXX: Moved into caller. */
904 }
905 
906 /* pmap_alloc_pv			INTERNAL
907  **
908  * Called from pmap_bootstrap() to allocate the physical
909  * to virtual mapping list.  Each physical page of memory
910  * in the system has a corresponding element in this list.
911  */
912 void
913 pmap_alloc_pv()
914 {
915 	int	i;
916 	unsigned int	total_mem;
917 
918 	/*
919 	 * Allocate a pv_head structure for every page of physical
920 	 * memory that will be managed by the system.  Since memory on
921 	 * the 3/80 is non-contiguous, we cannot arrive at a total page
922 	 * count by subtraction of the lowest available address from the
923 	 * highest, but rather we have to step through each memory
924 	 * bank and add the number of pages in each to the total.
925 	 *
926 	 * At this time we also initialize the offset of each bank's
927 	 * starting pv_head within the pv_head list so that the physical
928 	 * memory state routines (pmap_is_referenced(),
929 	 * pmap_is_modified(), et al.) can quickly find coresponding
930 	 * pv_heads in spite of the non-contiguity.
931 	 */
932 	total_mem = 0;
933 	for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
934 		avail_mem[i].pmem_pvbase = m68k_btop(total_mem);
935 		total_mem += avail_mem[i].pmem_end -
936 			avail_mem[i].pmem_start;
937 		if (avail_mem[i].pmem_next == NULL)
938 			break;
939 	}
940 	pvbase = (pv_t *) pmap_bootstrap_alloc(sizeof(pv_t) *
941 		m68k_btop(total_phys_mem));
942 }
943 
944 /* pmap_alloc_usertmgr			INTERNAL
945  **
946  * Called from pmap_bootstrap() to allocate the structures which
947  * facilitate management of user MMU tables.  Each user MMU table
948  * in the system has one such structure associated with it.
949  */
950 void
951 pmap_alloc_usertmgr()
952 {
953 	/* Allocate user MMU table managers */
954 	/* It would be a lot simpler to just make these BSS, but */
955 	/* we may want to change their size at boot time... -j */
956 	Atmgrbase = (a_tmgr_t *) pmap_bootstrap_alloc(sizeof(a_tmgr_t)
957 		* NUM_A_TABLES);
958 	Btmgrbase = (b_tmgr_t *) pmap_bootstrap_alloc(sizeof(b_tmgr_t)
959 		* NUM_B_TABLES);
960 	Ctmgrbase = (c_tmgr_t *) pmap_bootstrap_alloc(sizeof(c_tmgr_t)
961 		* NUM_C_TABLES);
962 
963 	/*
964 	 * Allocate PV list elements for the physical to virtual
965 	 * mapping system.
966 	 */
967 	pvebase = (pv_elem_t *) pmap_bootstrap_alloc(
968 		sizeof(pv_elem_t) * (NUM_USER_PTES + NUM_KERN_PTES));
969 }
970 
971 /* pmap_bootstrap_copyprom()			INTERNAL
972  **
973  * Copy the PROM mappings into our own tables.  Note, we
974  * can use physical addresses until __bootstrap returns.
975  */
976 void
977 pmap_bootstrap_copyprom()
978 {
979 	struct sunromvec *romp;
980 	int *mon_ctbl;
981 	mmu_short_pte_t *kpte;
982 	int i, len;
983 
984 	romp = romVectorPtr;
985 
986 	/*
987 	 * Copy the mappings in SUN3X_MON_KDB_BASE...SUN3X_MONEND
988 	 * Note: mon_ctbl[0] maps SUN3X_MON_KDB_BASE
989 	 */
990 	mon_ctbl = *romp->monptaddr;
991 	i = m68k_btop(SUN3X_MON_KDB_BASE - KERNBASE);
992 	kpte = &kernCbase[i];
993 	len = m68k_btop(SUN3X_MONEND - SUN3X_MON_KDB_BASE);
994 
995 	for (i = 0; i < len; i++) {
996 		kpte[i].attr.raw = mon_ctbl[i];
997 	}
998 
999 	/*
1000 	 * Copy the mappings at MON_DVMA_BASE (to the end).
1001 	 * Note, in here, mon_ctbl[0] maps MON_DVMA_BASE.
1002 	 * Actually, we only want the last page, which the
1003 	 * PROM has set up for use by the "ie" driver.
1004 	 * (The i82686 needs its SCP there.)
1005 	 * If we copy all the mappings, pmap_enter_kernel
1006 	 * may complain about finding valid PTEs that are
1007 	 * not recorded in our PV lists...
1008 	 */
1009 	mon_ctbl = *romp->shadowpteaddr;
1010 	i = m68k_btop(SUN3X_MON_DVMA_BASE - KERNBASE);
1011 	kpte = &kernCbase[i];
1012 	len = m68k_btop(SUN3X_MON_DVMA_SIZE);
1013 	for (i = (len-1); i < len; i++) {
1014 		kpte[i].attr.raw = mon_ctbl[i];
1015 	}
1016 }
1017 
1018 /* pmap_takeover_mmu			INTERNAL
1019  **
1020  * Called from pmap_bootstrap() after it has copied enough of the
1021  * PROM mappings into the kernel map so that we can use our own
1022  * MMU table.
1023  */
1024 void
1025 pmap_takeover_mmu()
1026 {
1027 
1028 	loadcrp(&kernel_crp);
1029 }
1030 
1031 /* pmap_bootstrap_setprom()			INTERNAL
1032  **
1033  * Set the PROM mappings so it can see kernel space.
1034  * Note that physical addresses are used here, which
1035  * we can get away with because this runs with the
1036  * low 1GB set for transparent translation.
1037  */
1038 void
1039 pmap_bootstrap_setprom()
1040 {
1041 	mmu_long_dte_t *mon_dte;
1042 	extern struct mmu_rootptr mon_crp;
1043 	int i;
1044 
1045 	mon_dte = (mmu_long_dte_t *) mon_crp.rp_addr;
1046 	for (i = MMU_TIA(KERNBASE); i < MMU_TIA(KERN_END); i++) {
1047 		mon_dte[i].attr.raw = kernAbase[i].attr.raw;
1048 		mon_dte[i].addr.raw = kernAbase[i].addr.raw;
1049 	}
1050 }
1051 
1052 
1053 /* pmap_init			INTERFACE
1054  **
1055  * Called at the end of vm_init() to set up the pmap system to go
1056  * into full time operation.  All initialization of kernel_pmap
1057  * should be already done by now, so this should just do things
1058  * needed for user-level pmaps to work.
1059  */
1060 void
1061 pmap_init()
1062 {
1063 	/** Initialize the manager pools **/
1064 	TAILQ_INIT(&a_pool);
1065 	TAILQ_INIT(&b_pool);
1066 	TAILQ_INIT(&c_pool);
1067 
1068 	/**************************************************************
1069 	 * Initialize all tmgr structures and MMU tables they manage. *
1070 	 **************************************************************/
1071 	/** Initialize A tables **/
1072 	pmap_init_a_tables();
1073 	/** Initialize B tables **/
1074 	pmap_init_b_tables();
1075 	/** Initialize C tables **/
1076 	pmap_init_c_tables();
1077 }
1078 
1079 /* pmap_init_a_tables()			INTERNAL
1080  **
1081  * Initializes all A managers, their MMU A tables, and inserts
1082  * them into the A manager pool for use by the system.
1083  */
1084 void
1085 pmap_init_a_tables()
1086 {
1087 	int i;
1088 	a_tmgr_t *a_tbl;
1089 
1090 	for (i=0; i < NUM_A_TABLES; i++) {
1091 		/* Select the next available A manager from the pool */
1092 		a_tbl = &Atmgrbase[i];
1093 
1094 		/*
1095 		 * Clear its parent entry.  Set its wired and valid
1096 		 * entry count to zero.
1097 		 */
1098 		a_tbl->at_parent = NULL;
1099 		a_tbl->at_wcnt = a_tbl->at_ecnt = 0;
1100 
1101 		/* Assign it the next available MMU A table from the pool */
1102 		a_tbl->at_dtbl = &mmuAbase[i * MMU_A_TBL_SIZE];
1103 
1104 		/*
1105 		 * Initialize the MMU A table with the table in the `proc0',
1106 		 * or kernel, mapping.  This ensures that every process has
1107 		 * the kernel mapped in the top part of its address space.
1108 		 */
1109 		bcopy(kernAbase, a_tbl->at_dtbl, MMU_A_TBL_SIZE *
1110 			sizeof(mmu_long_dte_t));
1111 
1112 		/*
1113 		 * Finally, insert the manager into the A pool,
1114 		 * making it ready to be used by the system.
1115 		 */
1116 		TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
1117     }
1118 }
1119 
1120 /* pmap_init_b_tables()			INTERNAL
1121  **
1122  * Initializes all B table managers, their MMU B tables, and
1123  * inserts them into the B manager pool for use by the system.
1124  */
1125 void
1126 pmap_init_b_tables()
1127 {
1128 	int i,j;
1129 	b_tmgr_t *b_tbl;
1130 
1131 	for (i=0; i < NUM_B_TABLES; i++) {
1132 		/* Select the next available B manager from the pool */
1133 		b_tbl = &Btmgrbase[i];
1134 
1135 		b_tbl->bt_parent = NULL;	/* clear its parent,  */
1136 		b_tbl->bt_pidx = 0;		/* parent index,      */
1137 		b_tbl->bt_wcnt = 0;		/* wired entry count, */
1138 		b_tbl->bt_ecnt = 0;		/* valid entry count. */
1139 
1140 		/* Assign it the next available MMU B table from the pool */
1141 		b_tbl->bt_dtbl = &mmuBbase[i * MMU_B_TBL_SIZE];
1142 
1143 		/* Invalidate every descriptor in the table */
1144 		for (j=0; j < MMU_B_TBL_SIZE; j++)
1145 			b_tbl->bt_dtbl[j].attr.raw = MMU_DT_INVALID;
1146 
1147 		/* Insert the manager into the B pool */
1148 		TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
1149 	}
1150 }
1151 
1152 /* pmap_init_c_tables()			INTERNAL
1153  **
1154  * Initializes all C table managers, their MMU C tables, and
1155  * inserts them into the C manager pool for use by the system.
1156  */
1157 void
1158 pmap_init_c_tables()
1159 {
1160 	int i,j;
1161 	c_tmgr_t *c_tbl;
1162 
1163 	for (i=0; i < NUM_C_TABLES; i++) {
1164 		/* Select the next available C manager from the pool */
1165 		c_tbl = &Ctmgrbase[i];
1166 
1167 		c_tbl->ct_parent = NULL;	/* clear its parent,  */
1168 		c_tbl->ct_pidx = 0;		/* parent index,      */
1169 		c_tbl->ct_wcnt = 0;		/* wired entry count, */
1170 		c_tbl->ct_ecnt = 0;		/* valid entry count, */
1171 		c_tbl->ct_pmap = NULL;		/* parent pmap,       */
1172 		c_tbl->ct_va = 0;		/* base of managed range */
1173 
1174 		/* Assign it the next available MMU C table from the pool */
1175 		c_tbl->ct_dtbl = &mmuCbase[i * MMU_C_TBL_SIZE];
1176 
1177 		for (j=0; j < MMU_C_TBL_SIZE; j++)
1178 			c_tbl->ct_dtbl[j].attr.raw = MMU_DT_INVALID;
1179 
1180 		TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
1181 	}
1182 }
1183 
1184 /* pmap_init_pv()			INTERNAL
1185  **
1186  * Initializes the Physical to Virtual mapping system.
1187  */
1188 void
1189 pmap_init_pv()
1190 {
1191 	int	i;
1192 
1193 	/* Initialize every PV head. */
1194 	for (i = 0; i < m68k_btop(total_phys_mem); i++) {
1195 		pvbase[i].pv_idx = PVE_EOL;	/* Indicate no mappings */
1196 		pvbase[i].pv_flags = 0;		/* Zero out page flags  */
1197 	}
1198 
1199 	pv_initialized = TRUE;
1200 }
1201 
1202 /* get_a_table			INTERNAL
1203  **
1204  * Retrieve and return a level A table for use in a user map.
1205  */
1206 a_tmgr_t *
1207 get_a_table()
1208 {
1209 	a_tmgr_t *tbl;
1210 	pmap_t pmap;
1211 
1212 	/* Get the top A table in the pool */
1213 	tbl = a_pool.tqh_first;
1214 	if (tbl == NULL) {
1215 		/*
1216 		 * XXX - Instead of panicing here and in other get_x_table
1217 		 * functions, we do have the option of sleeping on the head of
1218 		 * the table pool.  Any function which updates the table pool
1219 		 * would then issue a wakeup() on the head, thus waking up any
1220 		 * processes waiting for a table.
1221 		 *
1222 		 * Actually, the place to sleep would be when some process
1223 		 * asks for a "wired" mapping that would run us short of
1224 		 * mapping resources.  This design DEPENDS on always having
1225 		 * some mapping resources in the pool for stealing, so we
1226 		 * must make sure we NEVER let the pool become empty. -gwr
1227 		 */
1228 		panic("get_a_table: out of A tables.");
1229 	}
1230 
1231 	TAILQ_REMOVE(&a_pool, tbl, at_link);
1232 	/*
1233 	 * If the table has a non-null parent pointer then it is in use.
1234 	 * Forcibly abduct it from its parent and clear its entries.
1235 	 * No re-entrancy worries here.  This table would not be in the
1236 	 * table pool unless it was available for use.
1237 	 *
1238 	 * Note that the second argument to free_a_table() is FALSE.  This
1239 	 * indicates that the table should not be relinked into the A table
1240 	 * pool.  That is a job for the function that called us.
1241 	 */
1242 	if (tbl->at_parent) {
1243 		pmap = tbl->at_parent;
1244 		free_a_table(tbl, FALSE);
1245 		pmap->pm_a_tmgr = NULL;
1246 		pmap->pm_a_phys = kernAphys;
1247 	}
1248 #ifdef  NON_REENTRANT
1249 	/*
1250 	 * If the table isn't to be wired down, re-insert it at the
1251 	 * end of the pool.
1252 	 */
1253 	if (!wired)
1254 		/*
1255 		 * Quandary - XXX
1256 		 * Would it be better to let the calling function insert this
1257 		 * table into the queue?  By inserting it here, we are allowing
1258 		 * it to be stolen immediately.  The calling function is
1259 		 * probably not expecting to use a table that it is not
1260 		 * assured full control of.
1261 		 * Answer - In the intrest of re-entrancy, it is best to let
1262 		 * the calling function determine when a table is available
1263 		 * for use.  Therefore this code block is not used.
1264 		 */
1265 		TAILQ_INSERT_TAIL(&a_pool, tbl, at_link);
1266 #endif	/* NON_REENTRANT */
1267 	return tbl;
1268 }
1269 
1270 /* get_b_table			INTERNAL
1271  **
1272  * Return a level B table for use.
1273  */
1274 b_tmgr_t *
1275 get_b_table()
1276 {
1277 	b_tmgr_t *tbl;
1278 
1279 	/* See 'get_a_table' for comments. */
1280 	tbl = b_pool.tqh_first;
1281 	if (tbl == NULL)
1282 		panic("get_b_table: out of B tables.");
1283 	TAILQ_REMOVE(&b_pool, tbl, bt_link);
1284 	if (tbl->bt_parent) {
1285 		tbl->bt_parent->at_dtbl[tbl->bt_pidx].attr.raw = MMU_DT_INVALID;
1286 		tbl->bt_parent->at_ecnt--;
1287 		free_b_table(tbl, FALSE);
1288 	}
1289 #ifdef	NON_REENTRANT
1290 	if (!wired)
1291 		/* XXX see quandary in get_b_table */
1292 		/* XXX start lock */
1293 		TAILQ_INSERT_TAIL(&b_pool, tbl, bt_link);
1294 		/* XXX end lock */
1295 #endif	/* NON_REENTRANT */
1296 	return tbl;
1297 }
1298 
1299 /* get_c_table			INTERNAL
1300  **
1301  * Return a level C table for use.
1302  */
1303 c_tmgr_t *
1304 get_c_table()
1305 {
1306 	c_tmgr_t *tbl;
1307 
1308 	/* See 'get_a_table' for comments */
1309 	tbl = c_pool.tqh_first;
1310 	if (tbl == NULL)
1311 		panic("get_c_table: out of C tables.");
1312 	TAILQ_REMOVE(&c_pool, tbl, ct_link);
1313 	if (tbl->ct_parent) {
1314 		tbl->ct_parent->bt_dtbl[tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
1315 		tbl->ct_parent->bt_ecnt--;
1316 		free_c_table(tbl, FALSE);
1317 	}
1318 #ifdef	NON_REENTRANT
1319 	if (!wired)
1320 		/* XXX See quandary in get_a_table */
1321 		/* XXX start lock */
1322 		TAILQ_INSERT_TAIL(&c_pool, tbl, c_link);
1323 		/* XXX end lock */
1324 #endif	/* NON_REENTRANT */
1325 
1326 	return tbl;
1327 }
1328 
1329 /*
1330  * The following 'free_table' and 'steal_table' functions are called to
1331  * detach tables from their current obligations (parents and children) and
1332  * prepare them for reuse in another mapping.
1333  *
1334  * Free_table is used when the calling function will handle the fate
1335  * of the parent table, such as returning it to the free pool when it has
1336  * no valid entries.  Functions that do not want to handle this should
1337  * call steal_table, in which the parent table's descriptors and entry
1338  * count are automatically modified when this table is removed.
1339  */
1340 
1341 /* free_a_table			INTERNAL
1342  **
1343  * Unmaps the given A table and all child tables from their current
1344  * mappings.  Returns the number of pages that were invalidated.
1345  * If 'relink' is true, the function will return the table to the head
1346  * of the available table pool.
1347  *
1348  * Cache note: The MC68851 will automatically flush all
1349  * descriptors derived from a given A table from its
1350  * Automatic Translation Cache (ATC) if we issue a
1351  * 'PFLUSHR' instruction with the base address of the
1352  * table.  This function should do, and does so.
1353  * Note note: We are using an MC68030 - there is no
1354  * PFLUSHR.
1355  */
1356 int
1357 free_a_table(a_tbl, relink)
1358 	a_tmgr_t *a_tbl;
1359 	boolean_t relink;
1360 {
1361 	int i, removed_cnt;
1362 	mmu_long_dte_t	*dte;
1363 	mmu_short_dte_t *dtbl;
1364 	b_tmgr_t	*tmgr;
1365 
1366 	/*
1367 	 * Flush the ATC cache of all cached descriptors derived
1368 	 * from this table.
1369 	 * Sun3x does not use 68851's cached table feature
1370 	 * flush_atc_crp(mmu_vtop(a_tbl->dte));
1371 	 */
1372 
1373 	/*
1374 	 * Remove any pending cache flushes that were designated
1375 	 * for the pmap this A table belongs to.
1376 	 * a_tbl->parent->atc_flushq[0] = 0;
1377 	 * Not implemented in sun3x.
1378 	 */
1379 
1380 	/*
1381 	 * All A tables in the system should retain a map for the
1382 	 * kernel. If the table contains any valid descriptors
1383 	 * (other than those for the kernel area), invalidate them all,
1384 	 * stopping short of the kernel's entries.
1385 	 */
1386 	removed_cnt = 0;
1387 	if (a_tbl->at_ecnt) {
1388 		dte = a_tbl->at_dtbl;
1389 		for (i=0; i < MMU_TIA(KERNBASE); i++) {
1390 			/*
1391 			 * If a table entry points to a valid B table, free
1392 			 * it and its children.
1393 			 */
1394 			if (MMU_VALID_DT(dte[i])) {
1395 				/*
1396 				 * The following block does several things,
1397 				 * from innermost expression to the
1398 				 * outermost:
1399 				 * 1) It extracts the base (cc 1996)
1400 				 *    address of the B table pointed
1401 				 *    to in the A table entry dte[i].
1402 				 * 2) It converts this base address into
1403 				 *    the virtual address it can be
1404 				 *    accessed with. (all MMU tables point
1405 				 *    to physical addresses.)
1406 				 * 3) It finds the corresponding manager
1407 				 *    structure which manages this MMU table.
1408 				 * 4) It frees the manager structure.
1409 				 *    (This frees the MMU table and all
1410 				 *    child tables. See 'free_b_table' for
1411 				 *    details.)
1412 				 */
1413 				dtbl = mmu_ptov(dte[i].addr.raw);
1414 				tmgr = mmuB2tmgr(dtbl);
1415 				removed_cnt += free_b_table(tmgr, TRUE);
1416 				dte[i].attr.raw = MMU_DT_INVALID;
1417 			}
1418 		}
1419 		a_tbl->at_ecnt = 0;
1420 	}
1421 	if (relink) {
1422 		a_tbl->at_parent = NULL;
1423 		TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1424 		TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
1425 	}
1426 	return removed_cnt;
1427 }
1428 
1429 /* free_b_table			INTERNAL
1430  **
1431  * Unmaps the given B table and all its children from their current
1432  * mappings.  Returns the number of pages that were invalidated.
1433  * (For comments, see 'free_a_table()').
1434  */
1435 int
1436 free_b_table(b_tbl, relink)
1437 	b_tmgr_t *b_tbl;
1438 	boolean_t relink;
1439 {
1440 	int i, removed_cnt;
1441 	mmu_short_dte_t *dte;
1442 	mmu_short_pte_t	*dtbl;
1443 	c_tmgr_t	*tmgr;
1444 
1445 	removed_cnt = 0;
1446 	if (b_tbl->bt_ecnt) {
1447 		dte = b_tbl->bt_dtbl;
1448 		for (i=0; i < MMU_B_TBL_SIZE; i++) {
1449 			if (MMU_VALID_DT(dte[i])) {
1450 				dtbl = mmu_ptov(MMU_DTE_PA(dte[i]));
1451 				tmgr = mmuC2tmgr(dtbl);
1452 				removed_cnt += free_c_table(tmgr, TRUE);
1453 				dte[i].attr.raw = MMU_DT_INVALID;
1454 			}
1455 		}
1456 		b_tbl->bt_ecnt = 0;
1457 	}
1458 
1459 	if (relink) {
1460 		b_tbl->bt_parent = NULL;
1461 		TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1462 		TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
1463 	}
1464 	return removed_cnt;
1465 }
1466 
1467 /* free_c_table			INTERNAL
1468  **
1469  * Unmaps the given C table from use and returns it to the pool for
1470  * re-use.  Returns the number of pages that were invalidated.
1471  *
1472  * This function preserves any physical page modification information
1473  * contained in the page descriptors within the C table by calling
1474  * 'pmap_remove_pte().'
1475  */
1476 int
1477 free_c_table(c_tbl, relink)
1478 	c_tmgr_t *c_tbl;
1479 	boolean_t relink;
1480 {
1481 	int i, removed_cnt;
1482 
1483 	removed_cnt = 0;
1484 	if (c_tbl->ct_ecnt) {
1485 		for (i=0; i < MMU_C_TBL_SIZE; i++) {
1486 			if (MMU_VALID_DT(c_tbl->ct_dtbl[i])) {
1487 				pmap_remove_pte(&c_tbl->ct_dtbl[i]);
1488 				removed_cnt++;
1489 			}
1490 		}
1491 		c_tbl->ct_ecnt = 0;
1492 	}
1493 
1494 	if (relink) {
1495 		c_tbl->ct_parent = NULL;
1496 		TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1497 		TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
1498 	}
1499 	return removed_cnt;
1500 }
1501 
1502 #if 0
1503 /* free_c_table_novalid			INTERNAL
1504  **
1505  * Frees the given C table manager without checking to see whether
1506  * or not it contains any valid page descriptors as it is assumed
1507  * that it does not.
1508  */
1509 void
1510 free_c_table_novalid(c_tbl)
1511 	c_tmgr_t *c_tbl;
1512 {
1513 	TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1514 	TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
1515 	c_tbl->ct_parent->bt_dtbl[c_tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
1516 	c_tbl->ct_parent->bt_ecnt--;
1517 	/*
1518 	 * XXX - Should call equiv. of 'free_b_table_novalid' here if
1519 	 * we just removed the last entry of the parent B table.
1520 	 * But I want to insure that this will not endanger pmap_enter()
1521 	 * with sudden removal of tables it is working with.
1522 	 *
1523 	 * We should probably add another field to each table, indicating
1524 	 * whether or not it is 'locked', ie. in the process of being
1525 	 * modified.
1526 	 */
1527 	c_tbl->ct_parent = NULL;
1528 }
1529 #endif
1530 
1531 /* pmap_remove_pte			INTERNAL
1532  **
1533  * Unmap the given pte and preserve any page modification
1534  * information by transfering it to the pv head of the
1535  * physical page it maps to.  This function does not update
1536  * any reference counts because it is assumed that the calling
1537  * function will do so.
1538  */
1539 void
1540 pmap_remove_pte(pte)
1541 	mmu_short_pte_t *pte;
1542 {
1543 	u_short     pv_idx, targ_idx;
1544 	int         s;
1545 	vm_offset_t pa;
1546 	pv_t       *pv;
1547 
1548 	pa = MMU_PTE_PA(*pte);
1549 	if (is_managed(pa)) {
1550 		pv = pa2pv(pa);
1551 		targ_idx = pteidx(pte);	/* Index of PTE being removed    */
1552 
1553 		/*
1554 		 * If the PTE being removed is the first (or only) PTE in
1555 		 * the list of PTEs currently mapped to this page, remove the
1556 		 * PTE by changing the index found on the PV head.  Otherwise
1557 		 * a linear search through the list will have to be executed
1558 		 * in order to find the PVE which points to the PTE being
1559 		 * removed, so that it may be modified to point to its new
1560 		 * neighbor.
1561 		 */
1562 		s = splimp();
1563 		pv_idx = pv->pv_idx;	/* Index of first PTE in PV list */
1564 		if (pv_idx == targ_idx) {
1565 			pv->pv_idx = pvebase[targ_idx].pve_next;
1566 		} else {
1567 			/*
1568 			 * Find the PV element pointing to the target
1569 			 * element.  Note: may have pv_idx==PVE_EOL
1570 			 */
1571 			for (;;) {
1572 				if (pv_idx == PVE_EOL) {
1573 #ifdef	PMAP_DEBUG
1574 					printf("pmap_remove_pte: PVE_EOL\n");
1575 					Debugger();
1576 #endif
1577 					goto pv_not_found;
1578 				}
1579 				if (pvebase[pv_idx].pve_next == targ_idx)
1580 					break;
1581 				pv_idx = pvebase[pv_idx].pve_next;
1582 			}
1583 			/*
1584 			 * At this point, pv_idx is the index of the PV
1585 			 * element just before the target element in the list.
1586 			 * Unlink the target.
1587 			 */
1588 			pvebase[pv_idx].pve_next = pvebase[targ_idx].pve_next;
1589 		pv_not_found:
1590 		}
1591 		/*
1592 		 * Save the mod/ref bits of the pte by simply
1593 		 * ORing the entire pte onto the pv_flags member
1594 		 * of the pv structure.
1595 		 * There is no need to use a separate bit pattern
1596 		 * for usage information on the pv head than that
1597 		 * which is used on the MMU ptes.
1598 		 */
1599 		pv->pv_flags |= (u_short) pte->attr.raw;
1600 		splx(s);
1601 	}
1602 
1603 	pte->attr.raw = MMU_DT_INVALID;
1604 }
1605 
1606 /* pmap_stroll			INTERNAL
1607  **
1608  * Retrieve the addresses of all table managers involved in the mapping of
1609  * the given virtual address.  If the table walk completed sucessfully,
1610  * return TRUE.  If it was only partially sucessful, return FALSE.
1611  * The table walk performed by this function is important to many other
1612  * functions in this module.
1613  *
1614  * Note: This function ought to be easier to read.
1615  */
1616 boolean_t
1617 pmap_stroll(pmap, va, a_tbl, b_tbl, c_tbl, pte, a_idx, b_idx, pte_idx)
1618 	pmap_t pmap;
1619 	vm_offset_t va;
1620 	a_tmgr_t **a_tbl;
1621 	b_tmgr_t **b_tbl;
1622 	c_tmgr_t **c_tbl;
1623 	mmu_short_pte_t **pte;
1624 	int *a_idx, *b_idx, *pte_idx;
1625 {
1626 	mmu_long_dte_t *a_dte;   /* A: long descriptor table          */
1627 	mmu_short_dte_t *b_dte;  /* B: short descriptor table         */
1628 
1629 	if (pmap == pmap_kernel())
1630 		return FALSE;
1631 
1632 	/* Does the given pmap have its own A table? */
1633 	*a_tbl = pmap->pm_a_tmgr;
1634 	if (*a_tbl == NULL)
1635 		return FALSE; /* No.  Return unknown. */
1636 	/* Does the A table have a valid B table
1637 	 * under the corresponding table entry?
1638 	 */
1639 	*a_idx = MMU_TIA(va);
1640 	a_dte = &((*a_tbl)->at_dtbl[*a_idx]);
1641 	if (!MMU_VALID_DT(*a_dte))
1642 		return FALSE; /* No. Return unknown. */
1643 	/* Yes. Extract B table from the A table. */
1644 	*b_tbl = mmuB2tmgr(mmu_ptov(a_dte->addr.raw));
1645 	/* Does the B table have a valid C table
1646 	 * under the corresponding table entry?
1647 	 */
1648 	*b_idx = MMU_TIB(va);
1649 	b_dte = &((*b_tbl)->bt_dtbl[*b_idx]);
1650 	if (!MMU_VALID_DT(*b_dte))
1651 		return FALSE; /* No. Return unknown. */
1652 	/* Yes. Extract C table from the B table. */
1653 	*c_tbl = mmuC2tmgr(mmu_ptov(MMU_DTE_PA(*b_dte)));
1654 	*pte_idx = MMU_TIC(va);
1655 	*pte = &((*c_tbl)->ct_dtbl[*pte_idx]);
1656 
1657 	return	TRUE;
1658 }
1659 
1660 /* pmap_enter			INTERFACE
1661  **
1662  * Called by the kernel to map a virtual address
1663  * to a physical address in the given process map.
1664  *
1665  * Note: this function should apply an exclusive lock
1666  * on the pmap system for its duration.  (it certainly
1667  * would save my hair!!)
1668  * This function ought to be easier to read.
1669  */
1670 int
1671 pmap_enter(pmap, va, pa, prot, flags)
1672 	pmap_t	pmap;
1673 	vm_offset_t va;
1674 	vm_offset_t pa;
1675 	vm_prot_t prot;
1676 	int flags;
1677 {
1678 	boolean_t insert, managed; /* Marks the need for PV insertion.*/
1679 	u_short nidx;            /* PV list index                     */
1680 	int s;                   /* Used for splimp()/splx()          */
1681 	int mapflags;            /* Flags for the mapping (see NOTE1) */
1682 	u_int a_idx, b_idx, pte_idx; /* table indices                 */
1683 	a_tmgr_t *a_tbl;         /* A: long descriptor table manager  */
1684 	b_tmgr_t *b_tbl;         /* B: short descriptor table manager */
1685 	c_tmgr_t *c_tbl;         /* C: short page table manager       */
1686 	mmu_long_dte_t *a_dte;   /* A: long descriptor table          */
1687 	mmu_short_dte_t *b_dte;  /* B: short descriptor table         */
1688 	mmu_short_pte_t *c_pte;  /* C: short page descriptor table    */
1689 	pv_t      *pv;           /* pv list head                      */
1690 	boolean_t wired;         /* is the mapping to be wired?       */
1691 	enum {NONE, NEWA, NEWB, NEWC} llevel; /* used at end   */
1692 
1693 	if (pmap == NULL)
1694 		return (KERN_SUCCESS);
1695 	if (pmap == pmap_kernel()) {
1696 		pmap_enter_kernel(va, pa, prot);
1697 		return (KERN_SUCCESS);
1698 	}
1699 
1700 	/*
1701 	 * Determine if the mapping should be wired.
1702 	 */
1703 	wired = ((flags & PMAP_WIRED) != 0);
1704 
1705 	/*
1706 	 * NOTE1:
1707 	 *
1708 	 * On November 13, 1999, someone changed the pmap_enter() API such
1709 	 * that it now accepts a 'flags' argument.  This new argument
1710 	 * contains bit-flags for the architecture-independent (UVM) system to
1711 	 * use in signalling certain mapping requirements to the architecture-
1712 	 * dependent (pmap) system.  The argument it replaces, 'wired', is now
1713 	 * one of the flags within it.
1714 	 *
1715 	 * In addition to flags signaled by the architecture-independent
1716 	 * system, parts of the architecture-dependent section of the sun3x
1717 	 * kernel pass their own flags in the lower, unused bits of the
1718 	 * physical address supplied to this function.  These flags are
1719 	 * extracted and stored in the temporary variable 'mapflags'.
1720 	 *
1721 	 * Extract sun3x specific flags from the physical address.
1722 	 */
1723 	mapflags  = (pa & ~MMU_PAGE_MASK);
1724 	pa       &= MMU_PAGE_MASK;
1725 
1726 	/*
1727 	 * Determine if the physical address being mapped is on-board RAM.
1728 	 * Any other area of the address space is likely to belong to a
1729 	 * device and hence it would be disasterous to cache its contents.
1730 	 */
1731 	if ((managed = is_managed(pa)) == FALSE)
1732 		mapflags |= PMAP_NC;
1733 
1734 	/*
1735 	 * For user mappings we walk along the MMU tables of the given
1736 	 * pmap, reaching a PTE which describes the virtual page being
1737 	 * mapped or changed.  If any level of the walk ends in an invalid
1738 	 * entry, a table must be allocated and the entry must be updated
1739 	 * to point to it.
1740 	 * There is a bit of confusion as to whether this code must be
1741 	 * re-entrant.  For now we will assume it is.  To support
1742 	 * re-entrancy we must unlink tables from the table pool before
1743 	 * we assume we may use them.  Tables are re-linked into the pool
1744 	 * when we are finished with them at the end of the function.
1745 	 * But I don't feel like doing that until we have proof that this
1746 	 * needs to be re-entrant.
1747 	 * 'llevel' records which tables need to be relinked.
1748 	 */
1749 	llevel = NONE;
1750 
1751 	/*
1752 	 * Step 1 - Retrieve the A table from the pmap.  If it has no
1753 	 * A table, allocate a new one from the available pool.
1754 	 */
1755 
1756 	a_tbl = pmap->pm_a_tmgr;
1757 	if (a_tbl == NULL) {
1758 		/*
1759 		 * This pmap does not currently have an A table.  Allocate
1760 		 * a new one.
1761 		 */
1762 		a_tbl = get_a_table();
1763 		a_tbl->at_parent = pmap;
1764 
1765 		/*
1766 		 * Assign this new A table to the pmap, and calculate its
1767 		 * physical address so that loadcrp() can be used to make
1768 		 * the table active.
1769 		 */
1770 		pmap->pm_a_tmgr = a_tbl;
1771 		pmap->pm_a_phys = mmu_vtop(a_tbl->at_dtbl);
1772 
1773 		/*
1774 		 * If the process receiving a new A table is the current
1775 		 * process, we are responsible for setting the MMU so that
1776 		 * it becomes the current address space.  This only adds
1777 		 * new mappings, so no need to flush anything.
1778 		 */
1779 		if (pmap == current_pmap()) {
1780 			kernel_crp.rp_addr = pmap->pm_a_phys;
1781 			loadcrp(&kernel_crp);
1782 		}
1783 
1784 		if (!wired)
1785 			llevel = NEWA;
1786 	} else {
1787 		/*
1788 		 * Use the A table already allocated for this pmap.
1789 		 * Unlink it from the A table pool if necessary.
1790 		 */
1791 		if (wired && !a_tbl->at_wcnt)
1792 			TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1793 	}
1794 
1795 	/*
1796 	 * Step 2 - Walk into the B table.  If there is no valid B table,
1797 	 * allocate one.
1798 	 */
1799 
1800 	a_idx = MMU_TIA(va);            /* Calculate the TIA of the VA. */
1801 	a_dte = &a_tbl->at_dtbl[a_idx]; /* Retrieve descriptor from table */
1802 	if (MMU_VALID_DT(*a_dte)) {     /* Is the descriptor valid? */
1803 		/* The descriptor is valid.  Use the B table it points to. */
1804 		/*************************************
1805 		 *               a_idx               *
1806 		 *                 v                 *
1807 		 * a_tbl -> +-+-+-+-+-+-+-+-+-+-+-+- *
1808 		 *          | | | | | | | | | | | |  *
1809 		 *          +-+-+-+-+-+-+-+-+-+-+-+- *
1810 		 *                 |                 *
1811 		 *                 \- b_tbl -> +-+-  *
1812 		 *                             | |   *
1813 		 *                             +-+-  *
1814 		 *************************************/
1815 		b_dte = mmu_ptov(a_dte->addr.raw);
1816 		b_tbl = mmuB2tmgr(b_dte);
1817 
1818 		/*
1819 		 * If the requested mapping must be wired, but this table
1820 		 * being used to map it is not, the table must be removed
1821 		 * from the available pool and its wired entry count
1822 		 * incremented.
1823 		 */
1824 		if (wired && !b_tbl->bt_wcnt) {
1825 			TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1826 			a_tbl->at_wcnt++;
1827 		}
1828 	} else {
1829 		/* The descriptor is invalid.  Allocate a new B table. */
1830 		b_tbl = get_b_table();
1831 
1832 		/* Point the parent A table descriptor to this new B table. */
1833 		a_dte->addr.raw = mmu_vtop(b_tbl->bt_dtbl);
1834 		a_dte->attr.raw = MMU_LONG_DTE_LU | MMU_DT_SHORT;
1835 		a_tbl->at_ecnt++; /* Update parent's valid entry count */
1836 
1837 		/* Create the necessary back references to the parent table */
1838 		b_tbl->bt_parent = a_tbl;
1839 		b_tbl->bt_pidx = a_idx;
1840 
1841 		/*
1842 		 * If this table is to be wired, make sure the parent A table
1843 		 * wired count is updated to reflect that it has another wired
1844 		 * entry.
1845 		 */
1846 		if (wired)
1847 			a_tbl->at_wcnt++;
1848 		else if (llevel == NONE)
1849 			llevel = NEWB;
1850 	}
1851 
1852 	/*
1853 	 * Step 3 - Walk into the C table, if there is no valid C table,
1854 	 * allocate one.
1855 	 */
1856 
1857 	b_idx = MMU_TIB(va);            /* Calculate the TIB of the VA */
1858 	b_dte = &b_tbl->bt_dtbl[b_idx]; /* Retrieve descriptor from table */
1859 	if (MMU_VALID_DT(*b_dte)) {     /* Is the descriptor valid? */
1860 		/* The descriptor is valid.  Use the C table it points to. */
1861 		/**************************************
1862 		 *               c_idx                *
1863 		 * |                v                 *
1864 		 * \- b_tbl -> +-+-+-+-+-+-+-+-+-+-+- *
1865 		 *             | | | | | | | | | | |  *
1866 		 *             +-+-+-+-+-+-+-+-+-+-+- *
1867 		 *                  |                 *
1868 		 *                  \- c_tbl -> +-+-- *
1869 		 *                              | | | *
1870 		 *                              +-+-- *
1871 		 **************************************/
1872 		c_pte = mmu_ptov(MMU_PTE_PA(*b_dte));
1873 		c_tbl = mmuC2tmgr(c_pte);
1874 
1875 		/* If mapping is wired and table is not */
1876 		if (wired && !c_tbl->ct_wcnt) {
1877 			TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1878 			b_tbl->bt_wcnt++;
1879 		}
1880 	} else {
1881 		/* The descriptor is invalid.  Allocate a new C table. */
1882 		c_tbl = get_c_table();
1883 
1884 		/* Point the parent B table descriptor to this new C table. */
1885 		b_dte->attr.raw = mmu_vtop(c_tbl->ct_dtbl);
1886 		b_dte->attr.raw |= MMU_DT_SHORT;
1887 		b_tbl->bt_ecnt++; /* Update parent's valid entry count */
1888 
1889 		/* Create the necessary back references to the parent table */
1890 		c_tbl->ct_parent = b_tbl;
1891 		c_tbl->ct_pidx = b_idx;
1892 		/*
1893 		 * Store the pmap and base virtual managed address for faster
1894 		 * retrieval in the PV functions.
1895 		 */
1896 		c_tbl->ct_pmap = pmap;
1897 		c_tbl->ct_va = (va & (MMU_TIA_MASK|MMU_TIB_MASK));
1898 
1899 		/*
1900 		 * If this table is to be wired, make sure the parent B table
1901 		 * wired count is updated to reflect that it has another wired
1902 		 * entry.
1903 		 */
1904 		if (wired)
1905 			b_tbl->bt_wcnt++;
1906 		else if (llevel == NONE)
1907 			llevel = NEWC;
1908 	}
1909 
1910 	/*
1911 	 * Step 4 - Deposit a page descriptor (PTE) into the appropriate
1912 	 * slot of the C table, describing the PA to which the VA is mapped.
1913 	 */
1914 
1915 	pte_idx = MMU_TIC(va);
1916 	c_pte = &c_tbl->ct_dtbl[pte_idx];
1917 	if (MMU_VALID_DT(*c_pte)) { /* Is the entry currently valid? */
1918 		/*
1919 		 * The PTE is currently valid.  This particular call
1920 		 * is just a synonym for one (or more) of the following
1921 		 * operations:
1922 		 *     change protection of a page
1923 		 *     change wiring status of a page
1924 		 *     remove the mapping of a page
1925 		 *
1926 		 * XXX - Semi critical: This code should unwire the PTE
1927 		 * and, possibly, associated parent tables if this is a
1928 		 * change wiring operation.  Currently it does not.
1929 		 *
1930 		 * This may be ok if pmap_unwire() is the only
1931 		 * interface used to UNWIRE a page.
1932 		 */
1933 
1934 		/* First check if this is a wiring operation. */
1935 		if (wired && (c_pte->attr.raw & MMU_SHORT_PTE_WIRED)) {
1936 			/*
1937 			 * The PTE is already wired.  To prevent it from being
1938 			 * counted as a new wiring operation, reset the 'wired'
1939 			 * variable.
1940 			 */
1941 			wired = FALSE;
1942 		}
1943 
1944 		/* Is the new address the same as the old? */
1945 		if (MMU_PTE_PA(*c_pte) == pa) {
1946 			/*
1947 			 * Yes, mark that it does not need to be reinserted
1948 			 * into the PV list.
1949 			 */
1950 			insert = FALSE;
1951 
1952 			/*
1953 			 * Clear all but the modified, referenced and wired
1954 			 * bits on the PTE.
1955 			 */
1956 			c_pte->attr.raw &= (MMU_SHORT_PTE_M
1957 				| MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED);
1958 		} else {
1959 			/* No, remove the old entry */
1960 			pmap_remove_pte(c_pte);
1961 			insert = TRUE;
1962 		}
1963 
1964 		/*
1965 		 * TLB flush is only necessary if modifying current map.
1966 		 * However, in pmap_enter(), the pmap almost always IS
1967 		 * the current pmap, so don't even bother to check.
1968 		 */
1969 		TBIS(va);
1970 	} else {
1971 		/*
1972 		 * The PTE is invalid.  Increment the valid entry count in
1973 		 * the C table manager to reflect the addition of a new entry.
1974 		 */
1975 		c_tbl->ct_ecnt++;
1976 
1977 		/* XXX - temporarily make sure the PTE is cleared. */
1978 		c_pte->attr.raw = 0;
1979 
1980 		/* It will also need to be inserted into the PV list. */
1981 		insert = TRUE;
1982 	}
1983 
1984 	/*
1985 	 * If page is changing from unwired to wired status, set an unused bit
1986 	 * within the PTE to indicate that it is wired.  Also increment the
1987 	 * wired entry count in the C table manager.
1988 	 */
1989 	if (wired) {
1990 		c_pte->attr.raw |= MMU_SHORT_PTE_WIRED;
1991 		c_tbl->ct_wcnt++;
1992 	}
1993 
1994 	/*
1995 	 * Map the page, being careful to preserve modify/reference/wired
1996 	 * bits.  At this point it is assumed that the PTE either has no bits
1997 	 * set, or if there are set bits, they are only modified, reference or
1998 	 * wired bits.  If not, the following statement will cause erratic
1999 	 * behavior.
2000 	 */
2001 #ifdef	PMAP_DEBUG
2002 	if (c_pte->attr.raw & ~(MMU_SHORT_PTE_M |
2003 		MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED)) {
2004 		printf("pmap_enter: junk left in PTE at %p\n", c_pte);
2005 		Debugger();
2006 	}
2007 #endif
2008 	c_pte->attr.raw |= ((u_long) pa | MMU_DT_PAGE);
2009 
2010 	/*
2011 	 * If the mapping should be read-only, set the write protect
2012 	 * bit in the PTE.
2013 	 */
2014 	if (!(prot & VM_PROT_WRITE))
2015 		c_pte->attr.raw |= MMU_SHORT_PTE_WP;
2016 
2017 	/*
2018 	 * If the mapping should be cache inhibited (indicated by the flag
2019 	 * bits found on the lower order of the physical address.)
2020 	 * mark the PTE as a cache inhibited page.
2021 	 */
2022 	if (mapflags & PMAP_NC)
2023 		c_pte->attr.raw |= MMU_SHORT_PTE_CI;
2024 
2025 	/*
2026 	 * If the physical address being mapped is managed by the PV
2027 	 * system then link the pte into the list of pages mapped to that
2028 	 * address.
2029 	 */
2030 	if (insert && managed) {
2031 		pv = pa2pv(pa);
2032 		nidx = pteidx(c_pte);
2033 
2034 		s = splimp();
2035 		pvebase[nidx].pve_next = pv->pv_idx;
2036 		pv->pv_idx = nidx;
2037 		splx(s);
2038 	}
2039 
2040 	/* Move any allocated tables back into the active pool. */
2041 
2042 	switch (llevel) {
2043 		case NEWA:
2044 			TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2045 			/* FALLTHROUGH */
2046 		case NEWB:
2047 			TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2048 			/* FALLTHROUGH */
2049 		case NEWC:
2050 			TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2051 			/* FALLTHROUGH */
2052 		default:
2053 			break;
2054 	}
2055 
2056 	return (KERN_SUCCESS);
2057 }
2058 
2059 /* pmap_enter_kernel			INTERNAL
2060  **
2061  * Map the given virtual address to the given physical address within the
2062  * kernel address space.  This function exists because the kernel map does
2063  * not do dynamic table allocation.  It consists of a contiguous array of ptes
2064  * and can be edited directly without the need to walk through any tables.
2065  *
2066  * XXX: "Danger, Will Robinson!"
2067  * Note that the kernel should never take a fault on any page
2068  * between [ KERNBASE .. virtual_avail ] and this is checked in
2069  * trap.c for kernel-mode MMU faults.  This means that mappings
2070  * created in that range must be implicily wired. -gwr
2071  */
2072 void
2073 pmap_enter_kernel(va, pa, prot)
2074 	vm_offset_t va;
2075 	vm_offset_t pa;
2076 	vm_prot_t   prot;
2077 {
2078 	boolean_t       was_valid, insert;
2079 	u_short         pte_idx;
2080 	int             s, flags;
2081 	mmu_short_pte_t *pte;
2082 	pv_t            *pv;
2083 	vm_offset_t     old_pa;
2084 
2085 	flags = (pa & ~MMU_PAGE_MASK);
2086 	pa &= MMU_PAGE_MASK;
2087 
2088 	if (is_managed(pa))
2089 		insert = TRUE;
2090 	else
2091 		insert = FALSE;
2092 
2093 	/*
2094 	 * Calculate the index of the PTE being modified.
2095 	 */
2096 	pte_idx = (u_long) m68k_btop(va - KERNBASE);
2097 
2098 	/* This array is traditionally named "Sysmap" */
2099 	pte = &kernCbase[pte_idx];
2100 
2101 	s = splimp();
2102 	if (MMU_VALID_DT(*pte)) {
2103 		was_valid = TRUE;
2104 		/*
2105 		 * If the PTE already maps a different
2106 		 * physical address, umap and pv_unlink.
2107 		 */
2108 		old_pa = MMU_PTE_PA(*pte);
2109 		if (pa != old_pa)
2110 			pmap_remove_pte(pte);
2111 		else {
2112 		    /*
2113 		     * Old PA and new PA are the same.  No need to
2114 		     * relink the mapping within the PV list.
2115 		     */
2116 		     insert = FALSE;
2117 
2118 		    /*
2119 		     * Save any mod/ref bits on the PTE.
2120 		     */
2121 		    pte->attr.raw &= (MMU_SHORT_PTE_USED|MMU_SHORT_PTE_M);
2122 		}
2123 	} else {
2124 		pte->attr.raw = MMU_DT_INVALID;
2125 		was_valid = FALSE;
2126 	}
2127 
2128 	/*
2129 	 * Map the page.  Being careful to preserve modified/referenced bits
2130 	 * on the PTE.
2131 	 */
2132 	pte->attr.raw |= (pa | MMU_DT_PAGE);
2133 
2134 	if (!(prot & VM_PROT_WRITE)) /* If access should be read-only */
2135 		pte->attr.raw |= MMU_SHORT_PTE_WP;
2136 	if (flags & PMAP_NC)
2137 		pte->attr.raw |= MMU_SHORT_PTE_CI;
2138 	if (was_valid)
2139 		TBIS(va);
2140 
2141 	/*
2142 	 * Insert the PTE into the PV system, if need be.
2143 	 */
2144 	if (insert) {
2145 		pv = pa2pv(pa);
2146 		pvebase[pte_idx].pve_next = pv->pv_idx;
2147 		pv->pv_idx = pte_idx;
2148 	}
2149 	splx(s);
2150 
2151 }
2152 
2153 void
2154 pmap_kenter_pa(va, pa, prot)
2155 	vaddr_t va;
2156 	paddr_t pa;
2157 	vm_prot_t prot;
2158 {
2159 	pmap_enter(pmap_kernel(), va, pa, prot, PMAP_WIRED);
2160 }
2161 
2162 void
2163 pmap_kenter_pgs(va, pgs, npgs)
2164 	vaddr_t va;
2165 	struct vm_page **pgs;
2166 	int npgs;
2167 {
2168 	int i;
2169 
2170 	for (i = 0; i < npgs; i++, va += PAGE_SIZE) {
2171 		pmap_enter(pmap_kernel(), va, VM_PAGE_TO_PHYS(pgs[i]),
2172 				VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
2173 	}
2174 }
2175 
2176 void
2177 pmap_kremove(va, len)
2178 	vaddr_t va;
2179 	vsize_t len;
2180 {
2181 	for (len >>= PAGE_SHIFT; len > 0; len--, va += PAGE_SIZE) {
2182 		pmap_remove(pmap_kernel(), va, va + PAGE_SIZE);
2183 	}
2184 }
2185 
2186 /* pmap_map			INTERNAL
2187  **
2188  * Map a contiguous range of physical memory into a contiguous range of
2189  * the kernel virtual address space.
2190  *
2191  * Used for device mappings and early mapping of the kernel text/data/bss.
2192  * Returns the first virtual address beyond the end of the range.
2193  */
2194 vm_offset_t
2195 pmap_map(va, pa, endpa, prot)
2196 	vm_offset_t	va;
2197 	vm_offset_t	pa;
2198 	vm_offset_t	endpa;
2199 	int		prot;
2200 {
2201 	int sz;
2202 
2203 	sz = endpa - pa;
2204 	do {
2205 		pmap_enter_kernel(va, pa, prot);
2206 		va += NBPG;
2207 		pa += NBPG;
2208 		sz -= NBPG;
2209 	} while (sz > 0);
2210 	return(va);
2211 }
2212 
2213 /* pmap_protect			INTERFACE
2214  **
2215  * Apply the given protection to the given virtual address range within
2216  * the given map.
2217  *
2218  * It is ok for the protection applied to be stronger than what is
2219  * specified.  We use this to our advantage when the given map has no
2220  * mapping for the virtual address.  By skipping a page when this
2221  * is discovered, we are effectively applying a protection of VM_PROT_NONE,
2222  * and therefore do not need to map the page just to apply a protection
2223  * code.  Only pmap_enter() needs to create new mappings if they do not exist.
2224  *
2225  * XXX - This function could be speeded up by using pmap_stroll() for inital
2226  *       setup, and then manual scrolling in the for() loop.
2227  */
2228 void
2229 pmap_protect(pmap, startva, endva, prot)
2230 	pmap_t pmap;
2231 	vm_offset_t startva, endva;
2232 	vm_prot_t prot;
2233 {
2234 	boolean_t iscurpmap;
2235 	int a_idx, b_idx, c_idx;
2236 	a_tmgr_t *a_tbl;
2237 	b_tmgr_t *b_tbl;
2238 	c_tmgr_t *c_tbl;
2239 	mmu_short_pte_t *pte;
2240 
2241 	if (pmap == NULL)
2242 		return;
2243 	if (pmap == pmap_kernel()) {
2244 		pmap_protect_kernel(startva, endva, prot);
2245 		return;
2246 	}
2247 
2248 	/*
2249 	 * In this particular pmap implementation, there are only three
2250 	 * types of memory protection: 'all' (read/write/execute),
2251 	 * 'read-only' (read/execute) and 'none' (no mapping.)
2252 	 * It is not possible for us to treat 'executable' as a separate
2253 	 * protection type.  Therefore, protection requests that seek to
2254 	 * remove execute permission while retaining read or write, and those
2255 	 * that make little sense (write-only for example) are ignored.
2256 	 */
2257 	switch (prot) {
2258 		case VM_PROT_NONE:
2259 			/*
2260 			 * A request to apply the protection code of
2261 			 * 'VM_PROT_NONE' is a synonym for pmap_remove().
2262 			 */
2263 			pmap_remove(pmap, startva, endva);
2264 			return;
2265 		case	VM_PROT_EXECUTE:
2266 		case	VM_PROT_READ:
2267 		case	VM_PROT_READ|VM_PROT_EXECUTE:
2268 			/* continue */
2269 			break;
2270 		case	VM_PROT_WRITE:
2271 		case	VM_PROT_WRITE|VM_PROT_READ:
2272 		case	VM_PROT_WRITE|VM_PROT_EXECUTE:
2273 		case	VM_PROT_ALL:
2274 			/* None of these should happen in a sane system. */
2275 			return;
2276 	}
2277 
2278 	/*
2279 	 * If the pmap has no A table, it has no mappings and therefore
2280 	 * there is nothing to protect.
2281 	 */
2282 	if ((a_tbl = pmap->pm_a_tmgr) == NULL)
2283 		return;
2284 
2285 	a_idx = MMU_TIA(startva);
2286 	b_idx = MMU_TIB(startva);
2287 	c_idx = MMU_TIC(startva);
2288 	b_tbl = (b_tmgr_t *) c_tbl = NULL;
2289 
2290 	iscurpmap = (pmap == current_pmap());
2291 	while (startva < endva) {
2292 		if (b_tbl || MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
2293 		  if (b_tbl == NULL) {
2294 		    b_tbl = (b_tmgr_t *) a_tbl->at_dtbl[a_idx].addr.raw;
2295 		    b_tbl = mmu_ptov((vm_offset_t) b_tbl);
2296 		    b_tbl = mmuB2tmgr((mmu_short_dte_t *) b_tbl);
2297 		  }
2298 		  if (c_tbl || MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
2299 		    if (c_tbl == NULL) {
2300 		      c_tbl = (c_tmgr_t *) MMU_DTE_PA(b_tbl->bt_dtbl[b_idx]);
2301 		      c_tbl = mmu_ptov((vm_offset_t) c_tbl);
2302 		      c_tbl = mmuC2tmgr((mmu_short_pte_t *) c_tbl);
2303 		    }
2304 		    if (MMU_VALID_DT(c_tbl->ct_dtbl[c_idx])) {
2305 		      pte = &c_tbl->ct_dtbl[c_idx];
2306 		      /* make the mapping read-only */
2307 		      pte->attr.raw |= MMU_SHORT_PTE_WP;
2308 		      /*
2309 		       * If we just modified the current address space,
2310 		       * flush any translations for the modified page from
2311 		       * the translation cache and any data from it in the
2312 		       * data cache.
2313 		       */
2314 		      if (iscurpmap)
2315 		          TBIS(startva);
2316 		    }
2317 		    startva += NBPG;
2318 
2319 		    if (++c_idx >= MMU_C_TBL_SIZE) { /* exceeded C table? */
2320 		      c_tbl = NULL;
2321 		      c_idx = 0;
2322 		      if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2323 		        b_tbl = NULL;
2324 		        b_idx = 0;
2325 		      }
2326 		    }
2327 		  } else { /* C table wasn't valid */
2328 		    c_tbl = NULL;
2329 		    c_idx = 0;
2330 		    startva += MMU_TIB_RANGE;
2331 		    if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2332 		      b_tbl = NULL;
2333 		      b_idx = 0;
2334 		    }
2335 		  } /* C table */
2336 		} else { /* B table wasn't valid */
2337 		  b_tbl = NULL;
2338 		  b_idx = 0;
2339 		  startva += MMU_TIA_RANGE;
2340 		  a_idx++;
2341 		} /* B table */
2342 	}
2343 }
2344 
2345 /* pmap_protect_kernel			INTERNAL
2346  **
2347  * Apply the given protection code to a kernel address range.
2348  */
2349 void
2350 pmap_protect_kernel(startva, endva, prot)
2351 	vm_offset_t startva, endva;
2352 	vm_prot_t prot;
2353 {
2354 	vm_offset_t va;
2355 	mmu_short_pte_t *pte;
2356 
2357 	pte = &kernCbase[(unsigned long) m68k_btop(startva - KERNBASE)];
2358 	for (va = startva; va < endva; va += NBPG, pte++) {
2359 		if (MMU_VALID_DT(*pte)) {
2360 		    switch (prot) {
2361 		        case VM_PROT_ALL:
2362 		            break;
2363 		        case VM_PROT_EXECUTE:
2364 		        case VM_PROT_READ:
2365 		        case VM_PROT_READ|VM_PROT_EXECUTE:
2366 		            pte->attr.raw |= MMU_SHORT_PTE_WP;
2367 		            break;
2368 		        case VM_PROT_NONE:
2369 		            /* this is an alias for 'pmap_remove_kernel' */
2370 		            pmap_remove_pte(pte);
2371 		            break;
2372 		        default:
2373 		            break;
2374 		    }
2375 		    /*
2376 		     * since this is the kernel, immediately flush any cached
2377 		     * descriptors for this address.
2378 		     */
2379 		    TBIS(va);
2380 		}
2381 	}
2382 }
2383 
2384 /* pmap_unwire				INTERFACE
2385  **
2386  * Clear the wired attribute of the specified page.
2387  *
2388  * This function is called from vm_fault.c to unwire
2389  * a mapping.
2390  */
2391 void
2392 pmap_unwire(pmap, va)
2393 	pmap_t pmap;
2394 	vm_offset_t va;
2395 {
2396 	int a_idx, b_idx, c_idx;
2397 	a_tmgr_t *a_tbl;
2398 	b_tmgr_t *b_tbl;
2399 	c_tmgr_t *c_tbl;
2400 	mmu_short_pte_t *pte;
2401 
2402 	/* Kernel mappings always remain wired. */
2403 	if (pmap == pmap_kernel())
2404 		return;
2405 
2406 	/*
2407 	 * Walk through the tables.  If the walk terminates without
2408 	 * a valid PTE then the address wasn't wired in the first place.
2409 	 * Return immediately.
2410 	 */
2411 	if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, &pte, &a_idx,
2412 		&b_idx, &c_idx) == FALSE)
2413 		return;
2414 
2415 
2416 	/* Is the PTE wired?  If not, return. */
2417 	if (!(pte->attr.raw & MMU_SHORT_PTE_WIRED))
2418 		return;
2419 
2420 	/* Remove the wiring bit. */
2421 	pte->attr.raw &= ~(MMU_SHORT_PTE_WIRED);
2422 
2423 	/*
2424 	 * Decrement the wired entry count in the C table.
2425 	 * If it reaches zero the following things happen:
2426 	 * 1. The table no longer has any wired entries and is considered
2427 	 *    unwired.
2428 	 * 2. It is placed on the available queue.
2429 	 * 3. The parent table's wired entry count is decremented.
2430 	 * 4. If it reaches zero, this process repeats at step 1 and
2431 	 *    stops at after reaching the A table.
2432 	 */
2433 	if (--c_tbl->ct_wcnt == 0) {
2434 		TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2435 		if (--b_tbl->bt_wcnt == 0) {
2436 			TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2437 			if (--a_tbl->at_wcnt == 0) {
2438 				TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2439 			}
2440 		}
2441 	}
2442 }
2443 
2444 /* pmap_copy				INTERFACE
2445  **
2446  * Copy the mappings of a range of addresses in one pmap, into
2447  * the destination address of another.
2448  *
2449  * This routine is advisory.  Should we one day decide that MMU tables
2450  * may be shared by more than one pmap, this function should be used to
2451  * link them together.  Until that day however, we do nothing.
2452  */
2453 void
2454 pmap_copy(pmap_a, pmap_b, dst, len, src)
2455 	pmap_t pmap_a, pmap_b;
2456 	vm_offset_t dst;
2457 	vm_size_t   len;
2458 	vm_offset_t src;
2459 {
2460 	/* not implemented. */
2461 }
2462 
2463 /* pmap_copy_page			INTERFACE
2464  **
2465  * Copy the contents of one physical page into another.
2466  *
2467  * This function makes use of two virtual pages allocated in pmap_bootstrap()
2468  * to map the two specified physical pages into the kernel address space.
2469  *
2470  * Note: We could use the transparent translation registers to make the
2471  * mappings.  If we do so, be sure to disable interrupts before using them.
2472  */
2473 void
2474 pmap_copy_page(srcpa, dstpa)
2475 	vm_offset_t srcpa, dstpa;
2476 {
2477 	vm_offset_t srcva, dstva;
2478 	int s;
2479 
2480 	srcva = tmp_vpages[0];
2481 	dstva = tmp_vpages[1];
2482 
2483 	s = splimp();
2484 	if (tmp_vpages_inuse++)
2485 		panic("pmap_copy_page: temporary vpages are in use.");
2486 
2487 	/* Map pages as non-cacheable to avoid cache polution? */
2488 	pmap_enter_kernel(srcva, srcpa, VM_PROT_READ);
2489 	pmap_enter_kernel(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
2490 
2491 	/* Hand-optimized version of bcopy(src, dst, NBPG) */
2492 	copypage((char *) srcva, (char *) dstva);
2493 
2494 	pmap_remove_kernel(srcva, srcva + NBPG);
2495 	pmap_remove_kernel(dstva, dstva + NBPG);
2496 
2497 	--tmp_vpages_inuse;
2498 	splx(s);
2499 }
2500 
2501 /* pmap_zero_page			INTERFACE
2502  **
2503  * Zero the contents of the specified physical page.
2504  *
2505  * Uses one of the virtual pages allocated in pmap_boostrap()
2506  * to map the specified page into the kernel address space.
2507  */
2508 void
2509 pmap_zero_page(dstpa)
2510 	vm_offset_t dstpa;
2511 {
2512 	vm_offset_t dstva;
2513 	int s;
2514 
2515 	dstva = tmp_vpages[1];
2516 	s = splimp();
2517 	if (tmp_vpages_inuse++)
2518 		panic("pmap_zero_page: temporary vpages are in use.");
2519 
2520 	/* The comments in pmap_copy_page() above apply here also. */
2521 	pmap_enter_kernel(dstva, dstpa, VM_PROT_READ|VM_PROT_WRITE);
2522 
2523 	/* Hand-optimized version of bzero(ptr, NBPG) */
2524 	zeropage((char *) dstva);
2525 
2526 	pmap_remove_kernel(dstva, dstva + NBPG);
2527 
2528 	--tmp_vpages_inuse;
2529 	splx(s);
2530 }
2531 
2532 /* pmap_collect			INTERFACE
2533  **
2534  * Called from the VM system when we are about to swap out
2535  * the process using this pmap.  This should give up any
2536  * resources held here, including all its MMU tables.
2537  */
2538 void
2539 pmap_collect(pmap)
2540 	pmap_t pmap;
2541 {
2542 	/* XXX - todo... */
2543 }
2544 
2545 /* pmap_create			INTERFACE
2546  **
2547  * Create and return a pmap structure.
2548  */
2549 pmap_t
2550 pmap_create()
2551 {
2552 	pmap_t	pmap;
2553 
2554 	pmap = (pmap_t) malloc(sizeof(struct pmap), M_VMPMAP, M_WAITOK);
2555 	pmap_pinit(pmap);
2556 	return pmap;
2557 }
2558 
2559 /* pmap_pinit			INTERNAL
2560  **
2561  * Initialize a pmap structure.
2562  */
2563 void
2564 pmap_pinit(pmap)
2565 	pmap_t pmap;
2566 {
2567 	bzero(pmap, sizeof(struct pmap));
2568 	pmap->pm_a_tmgr = NULL;
2569 	pmap->pm_a_phys = kernAphys;
2570 }
2571 
2572 /* pmap_release				INTERFACE
2573  **
2574  * Release any resources held by the given pmap.
2575  *
2576  * This is the reverse analog to pmap_pinit.  It does not
2577  * necessarily mean for the pmap structure to be deallocated,
2578  * as in pmap_destroy.
2579  */
2580 void
2581 pmap_release(pmap)
2582 	pmap_t pmap;
2583 {
2584 	/*
2585 	 * As long as the pmap contains no mappings,
2586 	 * which always should be the case whenever
2587 	 * this function is called, there really should
2588 	 * be nothing to do.
2589 	 */
2590 #ifdef	PMAP_DEBUG
2591 	if (pmap == NULL)
2592 		return;
2593 	if (pmap == pmap_kernel())
2594 		panic("pmap_release: kernel pmap");
2595 #endif
2596 	/*
2597 	 * XXX - If this pmap has an A table, give it back.
2598 	 * The pmap SHOULD be empty by now, and pmap_remove
2599 	 * should have already given back the A table...
2600 	 * However, I see:  pmap->pm_a_tmgr->at_ecnt == 1
2601 	 * at this point, which means some mapping was not
2602 	 * removed when it should have been. -gwr
2603 	 */
2604 	if (pmap->pm_a_tmgr != NULL) {
2605 		/* First make sure we are not using it! */
2606 		if (kernel_crp.rp_addr == pmap->pm_a_phys) {
2607 			kernel_crp.rp_addr = kernAphys;
2608 			loadcrp(&kernel_crp);
2609 		}
2610 #ifdef	PMAP_DEBUG /* XXX - todo! */
2611 		/* XXX - Now complain... */
2612 		printf("pmap_release: still have table\n");
2613 		Debugger();
2614 #endif
2615 		free_a_table(pmap->pm_a_tmgr, TRUE);
2616 		pmap->pm_a_tmgr = NULL;
2617 		pmap->pm_a_phys = kernAphys;
2618 	}
2619 }
2620 
2621 /* pmap_reference			INTERFACE
2622  **
2623  * Increment the reference count of a pmap.
2624  */
2625 void
2626 pmap_reference(pmap)
2627 	pmap_t pmap;
2628 {
2629 	if (pmap == NULL)
2630 		return;
2631 
2632 	/* pmap_lock(pmap); */
2633 	pmap->pm_refcount++;
2634 	/* pmap_unlock(pmap); */
2635 }
2636 
2637 /* pmap_dereference			INTERNAL
2638  **
2639  * Decrease the reference count on the given pmap
2640  * by one and return the current count.
2641  */
2642 int
2643 pmap_dereference(pmap)
2644 	pmap_t pmap;
2645 {
2646 	int rtn;
2647 
2648 	if (pmap == NULL)
2649 		return 0;
2650 
2651 	/* pmap_lock(pmap); */
2652 	rtn = --pmap->pm_refcount;
2653 	/* pmap_unlock(pmap); */
2654 
2655 	return rtn;
2656 }
2657 
2658 /* pmap_destroy			INTERFACE
2659  **
2660  * Decrement a pmap's reference count and delete
2661  * the pmap if it becomes zero.  Will be called
2662  * only after all mappings have been removed.
2663  */
2664 void
2665 pmap_destroy(pmap)
2666 	pmap_t pmap;
2667 {
2668 	if (pmap == NULL)
2669 		return;
2670 	if (pmap == &kernel_pmap)
2671 		panic("pmap_destroy: kernel_pmap!");
2672 	if (pmap_dereference(pmap) == 0) {
2673 		pmap_release(pmap);
2674 		free(pmap, M_VMPMAP);
2675 	}
2676 }
2677 
2678 /* pmap_is_referenced			INTERFACE
2679  **
2680  * Determine if the given physical page has been
2681  * referenced (read from [or written to.])
2682  */
2683 boolean_t
2684 pmap_is_referenced(pg)
2685 	struct vm_page *pg;
2686 {
2687 	paddr_t   pa = VM_PAGE_TO_PHYS(pg);
2688 	pv_t      *pv;
2689 	int       idx, s;
2690 
2691 	if (!pv_initialized)
2692 		return FALSE;
2693 	/* XXX - this may be unecessary. */
2694 	if (!is_managed(pa))
2695 		return FALSE;
2696 
2697 	pv = pa2pv(pa);
2698 	/*
2699 	 * Check the flags on the pv head.  If they are set,
2700 	 * return immediately.  Otherwise a search must be done.
2701 	 */
2702 	if (pv->pv_flags & PV_FLAGS_USED)
2703 		return TRUE;
2704 
2705 	s = splimp();
2706 	/*
2707 	 * Search through all pv elements pointing
2708 	 * to this page and query their reference bits
2709 	 */
2710 	for (idx = pv->pv_idx;
2711 		 idx != PVE_EOL;
2712 		 idx = pvebase[idx].pve_next) {
2713 
2714 		if (MMU_PTE_USED(kernCbase[idx])) {
2715 			splx(s);
2716 			return TRUE;
2717 		}
2718 	}
2719 	splx(s);
2720 
2721 	return FALSE;
2722 }
2723 
2724 /* pmap_is_modified			INTERFACE
2725  **
2726  * Determine if the given physical page has been
2727  * modified (written to.)
2728  */
2729 boolean_t
2730 pmap_is_modified(pg)
2731 	struct vm_page *pg;
2732 {
2733 	paddr_t   pa = VM_PAGE_TO_PHYS(pg);
2734 	pv_t      *pv;
2735 	int       idx, s;
2736 
2737 	if (!pv_initialized)
2738 		return FALSE;
2739 	/* XXX - this may be unecessary. */
2740 	if (!is_managed(pa))
2741 		return FALSE;
2742 
2743 	/* see comments in pmap_is_referenced() */
2744 	pv = pa2pv(pa);
2745 	if (pv->pv_flags & PV_FLAGS_MDFY)
2746 		return TRUE;
2747 
2748 	s = splimp();
2749 	for (idx = pv->pv_idx;
2750 		 idx != PVE_EOL;
2751 		 idx = pvebase[idx].pve_next) {
2752 
2753 		if (MMU_PTE_MODIFIED(kernCbase[idx])) {
2754 			splx(s);
2755 			return TRUE;
2756 		}
2757 	}
2758 	splx(s);
2759 
2760 	return FALSE;
2761 }
2762 
2763 /* pmap_page_protect			INTERFACE
2764  **
2765  * Applies the given protection to all mappings to the given
2766  * physical page.
2767  */
2768 void
2769 pmap_page_protect(pg, prot)
2770 	struct vm_page *pg;
2771 	vm_prot_t prot;
2772 {
2773 	paddr_t   pa = VM_PAGE_TO_PHYS(pg);
2774 	pv_t      *pv;
2775 	int       idx, s;
2776 	vm_offset_t va;
2777 	struct mmu_short_pte_struct *pte;
2778 	c_tmgr_t  *c_tbl;
2779 	pmap_t    pmap, curpmap;
2780 
2781 	if (!is_managed(pa))
2782 		return;
2783 
2784 	curpmap = current_pmap();
2785 	pv = pa2pv(pa);
2786 	s = splimp();
2787 
2788 	for (idx = pv->pv_idx;
2789 		 idx != PVE_EOL;
2790 		 idx = pvebase[idx].pve_next) {
2791 
2792 		pte = &kernCbase[idx];
2793 		switch (prot) {
2794 			case VM_PROT_ALL:
2795 				/* do nothing */
2796 				break;
2797 			case VM_PROT_EXECUTE:
2798 			case VM_PROT_READ:
2799 			case VM_PROT_READ|VM_PROT_EXECUTE:
2800 				/*
2801 				 * Determine the virtual address mapped by
2802 				 * the PTE and flush ATC entries if necessary.
2803 				 */
2804 				va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2805 				/* XXX don't write protect pager mappings */
2806 				if (va >= PAGER_SVA && va < PAGER_EVA) {
2807 #ifdef	PMAP_DEBUG
2808 					/* XXX - Does this actually happen? */
2809 					printf("pmap_page_protect: in pager!\n");
2810 					Debugger();
2811 #endif
2812 				} else
2813 					pte->attr.raw |= MMU_SHORT_PTE_WP;
2814 				if (pmap == curpmap || pmap == pmap_kernel())
2815 					TBIS(va);
2816 				break;
2817 			case VM_PROT_NONE:
2818 				/* Save the mod/ref bits. */
2819 				pv->pv_flags |= pte->attr.raw;
2820 				/* Invalidate the PTE. */
2821 				pte->attr.raw = MMU_DT_INVALID;
2822 
2823 				/*
2824 				 * Update table counts.  And flush ATC entries
2825 				 * if necessary.
2826 				 */
2827 				va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2828 
2829 				/*
2830 				 * If the PTE belongs to the kernel map,
2831 				 * be sure to flush the page it maps.
2832 				 */
2833 				if (pmap == pmap_kernel()) {
2834 					TBIS(va);
2835 				} else {
2836 					/*
2837 					 * The PTE belongs to a user map.
2838 					 * update the entry count in the C
2839 					 * table to which it belongs and flush
2840 					 * the ATC if the mapping belongs to
2841 					 * the current pmap.
2842 					 */
2843 					c_tbl->ct_ecnt--;
2844 					if (pmap == curpmap)
2845 						TBIS(va);
2846 				}
2847 				break;
2848 			default:
2849 				break;
2850 		}
2851 	}
2852 
2853 	/*
2854 	 * If the protection code indicates that all mappings to the page
2855 	 * be removed, truncate the PV list to zero entries.
2856 	 */
2857 	if (prot == VM_PROT_NONE)
2858 		pv->pv_idx = PVE_EOL;
2859 	splx(s);
2860 }
2861 
2862 /* pmap_get_pteinfo		INTERNAL
2863  **
2864  * Called internally to find the pmap and virtual address within that
2865  * map to which the pte at the given index maps.  Also includes the PTE's C
2866  * table manager.
2867  *
2868  * Returns the pmap in the argument provided, and the virtual address
2869  * by return value.
2870  */
2871 vm_offset_t
2872 pmap_get_pteinfo(idx, pmap, tbl)
2873 	u_int idx;
2874 	pmap_t *pmap;
2875 	c_tmgr_t **tbl;
2876 {
2877 	vm_offset_t     va = 0;
2878 
2879 	/*
2880 	 * Determine if the PTE is a kernel PTE or a user PTE.
2881 	 */
2882 	if (idx >= NUM_KERN_PTES) {
2883 		/*
2884 		 * The PTE belongs to a user mapping.
2885 		 */
2886 		/* XXX: Would like an inline for this to validate idx... */
2887 		*tbl = &Ctmgrbase[(idx - NUM_KERN_PTES) / MMU_C_TBL_SIZE];
2888 
2889 		*pmap = (*tbl)->ct_pmap;
2890 		/*
2891 		 * To find the va to which the PTE maps, we first take
2892 		 * the table's base virtual address mapping which is stored
2893 		 * in ct_va.  We then increment this address by a page for
2894 		 * every slot skipped until we reach the PTE.
2895 		 */
2896 		va =    (*tbl)->ct_va;
2897 		va += m68k_ptob(idx % MMU_C_TBL_SIZE);
2898 	} else {
2899 		/*
2900 		 * The PTE belongs to the kernel map.
2901 		 */
2902 		*pmap = pmap_kernel();
2903 
2904 		va = m68k_ptob(idx);
2905 		va += KERNBASE;
2906 	}
2907 
2908 	return va;
2909 }
2910 
2911 /* pmap_clear_modify			INTERFACE
2912  **
2913  * Clear the modification bit on the page at the specified
2914  * physical address.
2915  *
2916  */
2917 boolean_t
2918 pmap_clear_modify(pg)
2919 	struct vm_page *pg;
2920 {
2921 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
2922 	boolean_t rv;
2923 
2924 	if (!is_managed(pa))
2925 		return FALSE;
2926 	rv = pmap_is_modified(pg);
2927 	pmap_clear_pv(pa, PV_FLAGS_MDFY);
2928 	return rv;
2929 }
2930 
2931 /* pmap_clear_reference			INTERFACE
2932  **
2933  * Clear the referenced bit on the page at the specified
2934  * physical address.
2935  */
2936 boolean_t
2937 pmap_clear_reference(pg)
2938 	struct vm_page *pg;
2939 {
2940 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
2941 	boolean_t rv;
2942 
2943 	if (!is_managed(pa))
2944 		return FALSE;
2945 	rv = pmap_is_referenced(pg);
2946 	pmap_clear_pv(pa, PV_FLAGS_USED);
2947 	return rv;
2948 }
2949 
2950 /* pmap_clear_pv			INTERNAL
2951  **
2952  * Clears the specified flag from the specified physical address.
2953  * (Used by pmap_clear_modify() and pmap_clear_reference().)
2954  *
2955  * Flag is one of:
2956  *   PV_FLAGS_MDFY - Page modified bit.
2957  *   PV_FLAGS_USED - Page used (referenced) bit.
2958  *
2959  * This routine must not only clear the flag on the pv list
2960  * head.  It must also clear the bit on every pte in the pv
2961  * list associated with the address.
2962  */
2963 void
2964 pmap_clear_pv(pa, flag)
2965 	vm_offset_t pa;
2966 	int flag;
2967 {
2968 	pv_t      *pv;
2969 	int       idx, s;
2970 	vm_offset_t     va;
2971 	pmap_t          pmap;
2972 	mmu_short_pte_t *pte;
2973 	c_tmgr_t        *c_tbl;
2974 
2975 	pv = pa2pv(pa);
2976 
2977 	s = splimp();
2978 	pv->pv_flags &= ~(flag);
2979 
2980 	for (idx = pv->pv_idx;
2981 		 idx != PVE_EOL;
2982 		 idx = pvebase[idx].pve_next) {
2983 
2984 		pte = &kernCbase[idx];
2985 		pte->attr.raw &= ~(flag);
2986 		/*
2987 		 * The MC68030 MMU will not set the modified or
2988 		 * referenced bits on any MMU tables for which it has
2989 		 * a cached descriptor with its modify bit set.  To insure
2990 		 * that it will modify these bits on the PTE during the next
2991 		 * time it is written to or read from, we must flush it from
2992 		 * the ATC.
2993 		 *
2994 		 * Ordinarily it is only necessary to flush the descriptor
2995 		 * if it is used in the current address space.  But since I
2996 		 * am not sure that there will always be a notion of
2997 		 * 'the current address space' when this function is called,
2998 		 * I will skip the test and always flush the address.  It
2999 		 * does no harm.
3000 		 */
3001 		va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
3002 		TBIS(va);
3003 	}
3004 	splx(s);
3005 }
3006 
3007 /* pmap_extract			INTERFACE
3008  **
3009  * Return the physical address mapped by the virtual address
3010  * in the specified pmap.
3011  *
3012  * Note: this function should also apply an exclusive lock
3013  * on the pmap system during its duration.
3014  */
3015 boolean_t
3016 pmap_extract(pmap, va, pap)
3017 	pmap_t pmap;
3018 	vaddr_t va;
3019 	paddr_t *pap;
3020 {
3021 	int a_idx, b_idx, pte_idx;
3022 	a_tmgr_t	*a_tbl;
3023 	b_tmgr_t	*b_tbl;
3024 	c_tmgr_t	*c_tbl;
3025 	mmu_short_pte_t	*c_pte;
3026 
3027 	if (pmap == pmap_kernel())
3028 		return pmap_extract_kernel(va, pap);
3029 	if (pmap == NULL)
3030 		return FALSE;
3031 
3032 	if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl,
3033 		&c_pte, &a_idx, &b_idx, &pte_idx) == FALSE)
3034 		return FALSE;
3035 
3036 	if (!MMU_VALID_DT(*c_pte))
3037 		return FALSE;
3038 
3039 	if (pap != NULL)
3040 		*pap = MMU_PTE_PA(*c_pte);
3041 	return (TRUE);
3042 }
3043 
3044 /* pmap_extract_kernel		INTERNAL
3045  **
3046  * Extract a translation from the kernel address space.
3047  */
3048 boolean_t
3049 pmap_extract_kernel(va, pap)
3050 	vaddr_t va;
3051 	paddr_t *pap;
3052 {
3053 	mmu_short_pte_t *pte;
3054 
3055 	pte = &kernCbase[(u_int) m68k_btop(va - KERNBASE)];
3056 	if (!MMU_VALID_DT(*pte))
3057 		return (FALSE);
3058 	if (pap != NULL)
3059 		*pap = MMU_PTE_PA(*pte);
3060 	return (TRUE);
3061 }
3062 
3063 /* pmap_remove_kernel		INTERNAL
3064  **
3065  * Remove the mapping of a range of virtual addresses from the kernel map.
3066  * The arguments are already page-aligned.
3067  */
3068 void
3069 pmap_remove_kernel(sva, eva)
3070 	vm_offset_t sva;
3071 	vm_offset_t eva;
3072 {
3073 	int idx, eidx;
3074 
3075 #ifdef	PMAP_DEBUG
3076 	if ((sva & PGOFSET) || (eva & PGOFSET))
3077 		panic("pmap_remove_kernel: alignment");
3078 #endif
3079 
3080 	idx  = m68k_btop(sva - KERNBASE);
3081 	eidx = m68k_btop(eva - KERNBASE);
3082 
3083 	while (idx < eidx) {
3084 		pmap_remove_pte(&kernCbase[idx++]);
3085 		TBIS(sva);
3086 		sva += NBPG;
3087 	}
3088 }
3089 
3090 /* pmap_remove			INTERFACE
3091  **
3092  * Remove the mapping of a range of virtual addresses from the given pmap.
3093  *
3094  * If the range contains any wired entries, this function will probably create
3095  * disaster.
3096  */
3097 void
3098 pmap_remove(pmap, start, end)
3099 	pmap_t pmap;
3100 	vm_offset_t start;
3101 	vm_offset_t end;
3102 {
3103 
3104 	if (pmap == pmap_kernel()) {
3105 		pmap_remove_kernel(start, end);
3106 		return;
3107 	}
3108 
3109 	/*
3110 	 * XXX - Temporary(?) statement to prevent panic caused
3111 	 * by vm_alloc_with_pager() handing us a software map (ie NULL)
3112 	 * to remove because it couldn't get backing store.
3113 	 * (I guess.)
3114 	 */
3115 	if (pmap == NULL)
3116 		return;
3117 
3118 	/*
3119 	 * If the pmap doesn't have an A table of its own, it has no mappings
3120 	 * that can be removed.
3121 	 */
3122 	if (pmap->pm_a_tmgr == NULL)
3123 		return;
3124 
3125 	/*
3126 	 * Remove the specified range from the pmap.  If the function
3127 	 * returns true, the operation removed all the valid mappings
3128 	 * in the pmap and freed its A table.  If this happened to the
3129 	 * currently loaded pmap, the MMU root pointer must be reloaded
3130 	 * with the default 'kernel' map.
3131 	 */
3132 	if (pmap_remove_a(pmap->pm_a_tmgr, start, end)) {
3133 		if (kernel_crp.rp_addr == pmap->pm_a_phys) {
3134 			kernel_crp.rp_addr = kernAphys;
3135 			loadcrp(&kernel_crp);
3136 			/* will do TLB flush below */
3137 		}
3138 		pmap->pm_a_tmgr = NULL;
3139 		pmap->pm_a_phys = kernAphys;
3140 	}
3141 
3142 	/*
3143 	 * If we just modified the current address space,
3144 	 * make sure to flush the MMU cache.
3145 	 *
3146 	 * XXX - this could be an unecessarily large flush.
3147 	 * XXX - Could decide, based on the size of the VA range
3148 	 * to be removed, whether to flush "by pages" or "all".
3149 	 */
3150 	if (pmap == current_pmap())
3151 		TBIAU();
3152 }
3153 
3154 /* pmap_remove_a			INTERNAL
3155  **
3156  * This is function number one in a set of three that removes a range
3157  * of memory in the most efficient manner by removing the highest possible
3158  * tables from the memory space.  This particular function attempts to remove
3159  * as many B tables as it can, delegating the remaining fragmented ranges to
3160  * pmap_remove_b().
3161  *
3162  * If the removal operation results in an empty A table, the function returns
3163  * TRUE.
3164  *
3165  * It's ugly but will do for now.
3166  */
3167 boolean_t
3168 pmap_remove_a(a_tbl, start, end)
3169 	a_tmgr_t *a_tbl;
3170 	vm_offset_t start;
3171 	vm_offset_t end;
3172 {
3173 	boolean_t empty;
3174 	int idx;
3175 	vm_offset_t nstart, nend;
3176 	b_tmgr_t *b_tbl;
3177 	mmu_long_dte_t  *a_dte;
3178 	mmu_short_dte_t *b_dte;
3179 
3180 	/*
3181 	 * The following code works with what I call a 'granularity
3182 	 * reduction algorithim'.  A range of addresses will always have
3183 	 * the following properties, which are classified according to
3184 	 * how the range relates to the size of the current granularity
3185 	 * - an A table entry:
3186 	 *
3187 	 *            1 2       3 4
3188 	 * -+---+---+---+---+---+---+---+-
3189 	 * -+---+---+---+---+---+---+---+-
3190 	 *
3191 	 * A range will always start on a granularity boundary, illustrated
3192 	 * by '+' signs in the table above, or it will start at some point
3193 	 * inbetween a granularity boundary, as illustrated by point 1.
3194 	 * The first step in removing a range of addresses is to remove the
3195 	 * range between 1 and 2, the nearest granularity boundary.  This
3196 	 * job is handled by the section of code governed by the
3197 	 * 'if (start < nstart)' statement.
3198 	 *
3199 	 * A range will always encompass zero or more intergral granules,
3200 	 * illustrated by points 2 and 3.  Integral granules are easy to
3201 	 * remove.  The removal of these granules is the second step, and
3202 	 * is handled by the code block 'if (nstart < nend)'.
3203 	 *
3204 	 * Lastly, a range will always end on a granularity boundary,
3205 	 * ill. by point 3, or it will fall just beyond one, ill. by point
3206 	 * 4.  The last step involves removing this range and is handled by
3207 	 * the code block 'if (nend < end)'.
3208 	 */
3209 	nstart = MMU_ROUND_UP_A(start);
3210 	nend = MMU_ROUND_A(end);
3211 
3212 	if (start < nstart) {
3213 		/*
3214 		 * This block is executed if the range starts between
3215 		 * a granularity boundary.
3216 		 *
3217 		 * First find the DTE which is responsible for mapping
3218 		 * the start of the range.
3219 		 */
3220 		idx = MMU_TIA(start);
3221 		a_dte = &a_tbl->at_dtbl[idx];
3222 
3223 		/*
3224 		 * If the DTE is valid then delegate the removal of the sub
3225 		 * range to pmap_remove_b(), which can remove addresses at
3226 		 * a finer granularity.
3227 		 */
3228 		if (MMU_VALID_DT(*a_dte)) {
3229 			b_dte = mmu_ptov(a_dte->addr.raw);
3230 			b_tbl = mmuB2tmgr(b_dte);
3231 
3232 			/*
3233 			 * The sub range to be removed starts at the start
3234 			 * of the full range we were asked to remove, and ends
3235 			 * at the greater of:
3236 			 * 1. The end of the full range, -or-
3237 			 * 2. The end of the full range, rounded down to the
3238 			 *    nearest granularity boundary.
3239 			 */
3240 			if (end < nstart)
3241 				empty = pmap_remove_b(b_tbl, start, end);
3242 			else
3243 				empty = pmap_remove_b(b_tbl, start, nstart);
3244 
3245 			/*
3246 			 * If the removal resulted in an empty B table,
3247 			 * invalidate the DTE that points to it and decrement
3248 			 * the valid entry count of the A table.
3249 			 */
3250 			if (empty) {
3251 				a_dte->attr.raw = MMU_DT_INVALID;
3252 				a_tbl->at_ecnt--;
3253 			}
3254 		}
3255 		/*
3256 		 * If the DTE is invalid, the address range is already non-
3257 		 * existant and can simply be skipped.
3258 		 */
3259 	}
3260 	if (nstart < nend) {
3261 		/*
3262 		 * This block is executed if the range spans a whole number
3263 		 * multiple of granules (A table entries.)
3264 		 *
3265 		 * First find the DTE which is responsible for mapping
3266 		 * the start of the first granule involved.
3267 		 */
3268 		idx = MMU_TIA(nstart);
3269 		a_dte = &a_tbl->at_dtbl[idx];
3270 
3271 		/*
3272 		 * Remove entire sub-granules (B tables) one at a time,
3273 		 * until reaching the end of the range.
3274 		 */
3275 		for (; nstart < nend; a_dte++, nstart += MMU_TIA_RANGE)
3276 			if (MMU_VALID_DT(*a_dte)) {
3277 				/*
3278 				 * Find the B table manager for the
3279 				 * entry and free it.
3280 				 */
3281 				b_dte = mmu_ptov(a_dte->addr.raw);
3282 				b_tbl = mmuB2tmgr(b_dte);
3283 				free_b_table(b_tbl, TRUE);
3284 
3285 				/*
3286 				 * Invalidate the DTE that points to the
3287 				 * B table and decrement the valid entry
3288 				 * count of the A table.
3289 				 */
3290 				a_dte->attr.raw = MMU_DT_INVALID;
3291 				a_tbl->at_ecnt--;
3292 			}
3293 	}
3294 	if (nend < end) {
3295 		/*
3296 		 * This block is executed if the range ends beyond a
3297 		 * granularity boundary.
3298 		 *
3299 		 * First find the DTE which is responsible for mapping
3300 		 * the start of the nearest (rounded down) granularity
3301 		 * boundary.
3302 		 */
3303 		idx = MMU_TIA(nend);
3304 		a_dte = &a_tbl->at_dtbl[idx];
3305 
3306 		/*
3307 		 * If the DTE is valid then delegate the removal of the sub
3308 		 * range to pmap_remove_b(), which can remove addresses at
3309 		 * a finer granularity.
3310 		 */
3311 		if (MMU_VALID_DT(*a_dte)) {
3312 			/*
3313 			 * Find the B table manager for the entry
3314 			 * and hand it to pmap_remove_b() along with
3315 			 * the sub range.
3316 			 */
3317 			b_dte = mmu_ptov(a_dte->addr.raw);
3318 			b_tbl = mmuB2tmgr(b_dte);
3319 
3320 			empty = pmap_remove_b(b_tbl, nend, end);
3321 
3322 			/*
3323 			 * If the removal resulted in an empty B table,
3324 			 * invalidate the DTE that points to it and decrement
3325 			 * the valid entry count of the A table.
3326 			 */
3327 			if (empty) {
3328 				a_dte->attr.raw = MMU_DT_INVALID;
3329 				a_tbl->at_ecnt--;
3330 			}
3331 		}
3332 	}
3333 
3334 	/*
3335 	 * If there are no more entries in the A table, release it
3336 	 * back to the available pool and return TRUE.
3337 	 */
3338 	if (a_tbl->at_ecnt == 0) {
3339 		a_tbl->at_parent = NULL;
3340 		TAILQ_REMOVE(&a_pool, a_tbl, at_link);
3341 		TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
3342 		empty = TRUE;
3343 	} else {
3344 		empty = FALSE;
3345 	}
3346 
3347 	return empty;
3348 }
3349 
3350 /* pmap_remove_b			INTERNAL
3351  **
3352  * Remove a range of addresses from an address space, trying to remove entire
3353  * C tables if possible.
3354  *
3355  * If the operation results in an empty B table, the function returns TRUE.
3356  */
3357 boolean_t
3358 pmap_remove_b(b_tbl, start, end)
3359 	b_tmgr_t *b_tbl;
3360 	vm_offset_t start;
3361 	vm_offset_t end;
3362 {
3363 	boolean_t empty;
3364 	int idx;
3365 	vm_offset_t nstart, nend, rstart;
3366 	c_tmgr_t *c_tbl;
3367 	mmu_short_dte_t  *b_dte;
3368 	mmu_short_pte_t  *c_dte;
3369 
3370 
3371 	nstart = MMU_ROUND_UP_B(start);
3372 	nend = MMU_ROUND_B(end);
3373 
3374 	if (start < nstart) {
3375 		idx = MMU_TIB(start);
3376 		b_dte = &b_tbl->bt_dtbl[idx];
3377 		if (MMU_VALID_DT(*b_dte)) {
3378 			c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3379 			c_tbl = mmuC2tmgr(c_dte);
3380 			if (end < nstart)
3381 				empty = pmap_remove_c(c_tbl, start, end);
3382 			else
3383 				empty = pmap_remove_c(c_tbl, start, nstart);
3384 			if (empty) {
3385 				b_dte->attr.raw = MMU_DT_INVALID;
3386 				b_tbl->bt_ecnt--;
3387 			}
3388 		}
3389 	}
3390 	if (nstart < nend) {
3391 		idx = MMU_TIB(nstart);
3392 		b_dte = &b_tbl->bt_dtbl[idx];
3393 		rstart = nstart;
3394 		while (rstart < nend) {
3395 			if (MMU_VALID_DT(*b_dte)) {
3396 				c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3397 				c_tbl = mmuC2tmgr(c_dte);
3398 				free_c_table(c_tbl, TRUE);
3399 				b_dte->attr.raw = MMU_DT_INVALID;
3400 				b_tbl->bt_ecnt--;
3401 			}
3402 			b_dte++;
3403 			rstart += MMU_TIB_RANGE;
3404 		}
3405 	}
3406 	if (nend < end) {
3407 		idx = MMU_TIB(nend);
3408 		b_dte = &b_tbl->bt_dtbl[idx];
3409 		if (MMU_VALID_DT(*b_dte)) {
3410 			c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3411 			c_tbl = mmuC2tmgr(c_dte);
3412 			empty = pmap_remove_c(c_tbl, nend, end);
3413 			if (empty) {
3414 				b_dte->attr.raw = MMU_DT_INVALID;
3415 				b_tbl->bt_ecnt--;
3416 			}
3417 		}
3418 	}
3419 
3420 	if (b_tbl->bt_ecnt == 0) {
3421 		b_tbl->bt_parent = NULL;
3422 		TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
3423 		TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
3424 		empty = TRUE;
3425 	} else {
3426 		empty = FALSE;
3427 	}
3428 
3429 	return empty;
3430 }
3431 
3432 /* pmap_remove_c			INTERNAL
3433  **
3434  * Remove a range of addresses from the given C table.
3435  */
3436 boolean_t
3437 pmap_remove_c(c_tbl, start, end)
3438 	c_tmgr_t *c_tbl;
3439 	vm_offset_t start;
3440 	vm_offset_t end;
3441 {
3442 	boolean_t empty;
3443 	int idx;
3444 	mmu_short_pte_t *c_pte;
3445 
3446 	idx = MMU_TIC(start);
3447 	c_pte = &c_tbl->ct_dtbl[idx];
3448 	for (;start < end; start += MMU_PAGE_SIZE, c_pte++) {
3449 		if (MMU_VALID_DT(*c_pte)) {
3450 			pmap_remove_pte(c_pte);
3451 			c_tbl->ct_ecnt--;
3452 		}
3453 	}
3454 
3455 	if (c_tbl->ct_ecnt == 0) {
3456 		c_tbl->ct_parent = NULL;
3457 		TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
3458 		TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
3459 		empty = TRUE;
3460 	} else {
3461 		empty = FALSE;
3462 	}
3463 
3464 	return empty;
3465 }
3466 
3467 /* is_managed				INTERNAL
3468  **
3469  * Determine if the given physical address is managed by the PV system.
3470  * Note that this logic assumes that no one will ask for the status of
3471  * addresses which lie in-between the memory banks on the 3/80.  If they
3472  * do so, it will falsely report that it is managed.
3473  *
3474  * Note: A "managed" address is one that was reported to the VM system as
3475  * a "usable page" during system startup.  As such, the VM system expects the
3476  * pmap module to keep an accurate track of the useage of those pages.
3477  * Any page not given to the VM system at startup does not exist (as far as
3478  * the VM system is concerned) and is therefore "unmanaged."  Examples are
3479  * those pages which belong to the ROM monitor and the memory allocated before
3480  * the VM system was started.
3481  */
3482 boolean_t
3483 is_managed(pa)
3484 	vm_offset_t pa;
3485 {
3486 	if (pa >= avail_start && pa < avail_end)
3487 		return TRUE;
3488 	else
3489 		return FALSE;
3490 }
3491 
3492 /* pmap_bootstrap_alloc			INTERNAL
3493  **
3494  * Used internally for memory allocation at startup when malloc is not
3495  * available.  This code will fail once it crosses the first memory
3496  * bank boundary on the 3/80.  Hopefully by then however, the VM system
3497  * will be in charge of allocation.
3498  */
3499 void *
3500 pmap_bootstrap_alloc(size)
3501 	int size;
3502 {
3503 	void *rtn;
3504 
3505 #ifdef	PMAP_DEBUG
3506 	if (bootstrap_alloc_enabled == FALSE) {
3507 		mon_printf("pmap_bootstrap_alloc: disabled\n");
3508 		sunmon_abort();
3509 	}
3510 #endif
3511 
3512 	rtn = (void *) virtual_avail;
3513 	virtual_avail += size;
3514 
3515 #ifdef	PMAP_DEBUG
3516 	if (virtual_avail > virtual_contig_end) {
3517 		mon_printf("pmap_bootstrap_alloc: out of mem\n");
3518 		sunmon_abort();
3519 	}
3520 #endif
3521 
3522 	return rtn;
3523 }
3524 
3525 /* pmap_bootstap_aalign			INTERNAL
3526  **
3527  * Used to insure that the next call to pmap_bootstrap_alloc() will
3528  * return a chunk of memory aligned to the specified size.
3529  *
3530  * Note: This function will only support alignment sizes that are powers
3531  * of two.
3532  */
3533 void
3534 pmap_bootstrap_aalign(size)
3535 	int size;
3536 {
3537 	int off;
3538 
3539 	off = virtual_avail & (size - 1);
3540 	if (off) {
3541 		(void) pmap_bootstrap_alloc(size - off);
3542 	}
3543 }
3544 
3545 /* pmap_pa_exists
3546  **
3547  * Used by the /dev/mem driver to see if a given PA is memory
3548  * that can be mapped.  (The PA is not in a hole.)
3549  */
3550 int
3551 pmap_pa_exists(pa)
3552 	vm_offset_t pa;
3553 {
3554 	register int i;
3555 
3556 	for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3557 		if ((pa >= avail_mem[i].pmem_start) &&
3558 			(pa <  avail_mem[i].pmem_end))
3559 			return (1);
3560 		if (avail_mem[i].pmem_next == NULL)
3561 			break;
3562 	}
3563 	return (0);
3564 }
3565 
3566 /* Called only from locore.s and pmap.c */
3567 void	_pmap_switch __P((pmap_t pmap));
3568 
3569 /*
3570  * _pmap_switch			INTERNAL
3571  *
3572  * This is called by locore.s:cpu_switch() when it is
3573  * switching to a new process.  Load new translations.
3574  * Note: done in-line by locore.s unless PMAP_DEBUG
3575  *
3576  * Note that we do NOT allocate a context here, but
3577  * share the "kernel only" context until we really
3578  * need our own context for user-space mappings in
3579  * pmap_enter_user().  [ s/context/mmu A table/ ]
3580  */
3581 void
3582 _pmap_switch(pmap)
3583 	pmap_t pmap;
3584 {
3585 	u_long rootpa;
3586 
3587 	/*
3588 	 * Only do reload/flush if we have to.
3589 	 * Note that if the old and new process
3590 	 * were BOTH using the "null" context,
3591 	 * then this will NOT flush the TLB.
3592 	 */
3593 	rootpa = pmap->pm_a_phys;
3594 	if (kernel_crp.rp_addr != rootpa) {
3595 		DPRINT(("pmap_activate(%p)\n", pmap));
3596 		kernel_crp.rp_addr = rootpa;
3597 		loadcrp(&kernel_crp);
3598 		TBIAU();
3599 	}
3600 }
3601 
3602 /*
3603  * Exported version of pmap_activate().  This is called from the
3604  * machine-independent VM code when a process is given a new pmap.
3605  * If (p == curproc) do like cpu_switch would do; otherwise just
3606  * take this as notification that the process has a new pmap.
3607  */
3608 void
3609 pmap_activate(p)
3610 	struct proc *p;
3611 {
3612 	pmap_t pmap = p->p_vmspace->vm_map.pmap;
3613 	int s;
3614 
3615 	if (p == curproc) {
3616 		s = splimp();
3617 		_pmap_switch(pmap);
3618 		splx(s);
3619 	}
3620 }
3621 
3622 /*
3623  * pmap_deactivate			INTERFACE
3624  **
3625  * This is called to deactivate the specified process's address space.
3626  * XXX The semantics of this function are currently not well-defined.
3627  */
3628 void
3629 pmap_deactivate(p)
3630 struct proc *p;
3631 {
3632 	/* not implemented. */
3633 }
3634 
3635 /* pmap_update
3636  **
3637  * Apply any delayed changes scheduled for all pmaps immediately.
3638  *
3639  * No delayed operations are currently done in this pmap.
3640  */
3641 void
3642 pmap_update()
3643 {
3644 	/* not implemented. */
3645 }
3646 
3647 /*
3648  * Fill in the sun3x-specific part of the kernel core header
3649  * for dumpsys().  (See machdep.c for the rest.)
3650  */
3651 void
3652 pmap_kcore_hdr(sh)
3653 	struct sun3x_kcore_hdr *sh;
3654 {
3655 	u_long spa, len;
3656 	int i;
3657 
3658 	sh->pg_frame = MMU_SHORT_PTE_BASEADDR;
3659 	sh->pg_valid = MMU_DT_PAGE;
3660 	sh->contig_end = virtual_contig_end;
3661 	sh->kernCbase = (u_long) kernCbase;
3662 	for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3663 		spa = avail_mem[i].pmem_start;
3664 		spa = m68k_trunc_page(spa);
3665 		len = avail_mem[i].pmem_end - spa;
3666 		len = m68k_round_page(len);
3667 		sh->ram_segs[i].start = spa;
3668 		sh->ram_segs[i].size  = len;
3669 	}
3670 }
3671 
3672 
3673 /* pmap_virtual_space			INTERFACE
3674  **
3675  * Return the current available range of virtual addresses in the
3676  * arguuments provided.  Only really called once.
3677  */
3678 void
3679 pmap_virtual_space(vstart, vend)
3680 	vm_offset_t *vstart, *vend;
3681 {
3682 	*vstart = virtual_avail;
3683 	*vend = virtual_end;
3684 }
3685 
3686 /*
3687  * Provide memory to the VM system.
3688  *
3689  * Assume avail_start is always in the
3690  * first segment as pmap_bootstrap does.
3691  */
3692 static void
3693 pmap_page_upload()
3694 {
3695 	vm_offset_t	a, b;	/* memory range */
3696 	int i;
3697 
3698 	/* Supply the memory in segments. */
3699 	for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3700 		a = atop(avail_mem[i].pmem_start);
3701 		b = atop(avail_mem[i].pmem_end);
3702 		if (i == 0)
3703 			a = atop(avail_start);
3704 
3705 		uvm_page_physload(a, b, a, b, VM_FREELIST_DEFAULT);
3706 
3707 		if (avail_mem[i].pmem_next == NULL)
3708 			break;
3709 	}
3710 }
3711 
3712 /* pmap_page_index			INTERFACE
3713  **
3714  * Return the index of the given physical page in a list of useable
3715  * physical pages in the system.  Holes in physical memory may be counted
3716  * if so desired.  As long as pmap_free_pages() and pmap_page_index()
3717  * agree as to whether holes in memory do or do not count as valid pages,
3718  * it really doesn't matter.  However, if you like to save a little
3719  * memory, don't count holes as valid pages.  This is even more true when
3720  * the holes are large.
3721  *
3722  * We will not count holes as valid pages.  We can generate page indices
3723  * that conform to this by using the memory bank structures initialized
3724  * in pmap_alloc_pv().
3725  */
3726 int
3727 pmap_page_index(pa)
3728 	vm_offset_t pa;
3729 {
3730 	struct pmap_physmem_struct *bank = avail_mem;
3731 	vm_offset_t off;
3732 
3733 	/* Search for the memory bank with this page. */
3734 	/* XXX - What if it is not physical memory? */
3735 	while (pa > bank->pmem_end)
3736 		bank = bank->pmem_next;
3737 	off = pa - bank->pmem_start;
3738 
3739 	return (bank->pmem_pvbase + m68k_btop(off));
3740 }
3741 
3742 /* pmap_count			INTERFACE
3743  **
3744  * Return the number of resident (valid) pages in the given pmap.
3745  *
3746  * Note:  If this function is handed the kernel map, it will report
3747  * that it has no mappings.  Hopefully the VM system won't ask for kernel
3748  * map statistics.
3749  */
3750 segsz_t
3751 pmap_count(pmap, type)
3752 	pmap_t pmap;
3753 	int    type;
3754 {
3755 	u_int     count;
3756 	int       a_idx, b_idx;
3757 	a_tmgr_t *a_tbl;
3758 	b_tmgr_t *b_tbl;
3759 	c_tmgr_t *c_tbl;
3760 
3761 	/*
3762 	 * If the pmap does not have its own A table manager, it has no
3763 	 * valid entires.
3764 	 */
3765 	if (pmap->pm_a_tmgr == NULL)
3766 		return 0;
3767 
3768 	a_tbl = pmap->pm_a_tmgr;
3769 
3770 	count = 0;
3771 	for (a_idx = 0; a_idx < MMU_TIA(KERNBASE); a_idx++) {
3772 	    if (MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
3773 	        b_tbl = mmuB2tmgr(mmu_ptov(a_tbl->at_dtbl[a_idx].addr.raw));
3774 	        for (b_idx = 0; b_idx < MMU_B_TBL_SIZE; b_idx++) {
3775 	            if (MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
3776 	                c_tbl = mmuC2tmgr(
3777 	                    mmu_ptov(MMU_DTE_PA(b_tbl->bt_dtbl[b_idx])));
3778 	                if (type == 0)
3779 	                    /*
3780 	                     * A resident entry count has been requested.
3781 	                     */
3782 	                    count += c_tbl->ct_ecnt;
3783 	                else
3784 	                    /*
3785 	                     * A wired entry count has been requested.
3786 	                     */
3787 	                    count += c_tbl->ct_wcnt;
3788 	            }
3789 	        }
3790 	    }
3791 	}
3792 
3793 	return count;
3794 }
3795 
3796 /************************ SUN3 COMPATIBILITY ROUTINES ********************
3797  * The following routines are only used by DDB for tricky kernel text    *
3798  * text operations in db_memrw.c.  They are provided for sun3            *
3799  * compatibility.                                                        *
3800  *************************************************************************/
3801 /* get_pte			INTERNAL
3802  **
3803  * Return the page descriptor the describes the kernel mapping
3804  * of the given virtual address.
3805  */
3806 extern u_long ptest_addr __P((u_long));	/* XXX: locore.s */
3807 u_int
3808 get_pte(va)
3809 	vm_offset_t va;
3810 {
3811 	u_long pte_pa;
3812 	mmu_short_pte_t *pte;
3813 
3814 	/* Get the physical address of the PTE */
3815 	pte_pa = ptest_addr(va & ~PGOFSET);
3816 
3817 	/* Convert to a virtual address... */
3818 	pte = (mmu_short_pte_t *) (KERNBASE + pte_pa);
3819 
3820 	/* Make sure it is in our level-C tables... */
3821 	if ((pte < kernCbase) ||
3822 		(pte >= &mmuCbase[NUM_USER_PTES]))
3823 		return 0;
3824 
3825 	/* ... and just return its contents. */
3826 	return (pte->attr.raw);
3827 }
3828 
3829 
3830 /* set_pte			INTERNAL
3831  **
3832  * Set the page descriptor that describes the kernel mapping
3833  * of the given virtual address.
3834  */
3835 void
3836 set_pte(va, pte)
3837 	vm_offset_t va;
3838 	u_int pte;
3839 {
3840 	u_long idx;
3841 
3842 	if (va < KERNBASE)
3843 		return;
3844 
3845 	idx = (unsigned long) m68k_btop(va - KERNBASE);
3846 	kernCbase[idx].attr.raw = pte;
3847 	TBIS(va);
3848 }
3849 
3850 /*
3851  *	Routine:        pmap_procwr
3852  *
3853  *	Function:
3854  *		Synchronize caches corresponding to [addr, addr+len) in p.
3855  */
3856 void
3857 pmap_procwr(p, va, len)
3858 	struct proc	*p;
3859 	vaddr_t		va;
3860 	size_t		len;
3861 {
3862 	(void)cachectl1(0x80000004, va, len, p);
3863 }
3864 
3865 
3866 #ifdef	PMAP_DEBUG
3867 /************************** DEBUGGING ROUTINES **************************
3868  * The following routines are meant to be an aid to debugging the pmap  *
3869  * system.  They are callable from the DDB command line and should be   *
3870  * prepared to be handed unstable or incomplete states of the system.   *
3871  ************************************************************************/
3872 
3873 /* pv_list
3874  **
3875  * List all pages found on the pv list for the given physical page.
3876  * To avoid endless loops, the listing will stop at the end of the list
3877  * or after 'n' entries - whichever comes first.
3878  */
3879 void
3880 pv_list(pa, n)
3881 	vm_offset_t pa;
3882 	int n;
3883 {
3884 	int  idx;
3885 	vm_offset_t va;
3886 	pv_t *pv;
3887 	c_tmgr_t *c_tbl;
3888 	pmap_t pmap;
3889 
3890 	pv = pa2pv(pa);
3891 	idx = pv->pv_idx;
3892 
3893 	for (;idx != PVE_EOL && n > 0;
3894 		 idx=pvebase[idx].pve_next, n--) {
3895 
3896 		va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
3897 		printf("idx %d, pmap 0x%x, va 0x%x, c_tbl %x\n",
3898 			idx, (u_int) pmap, (u_int) va, (u_int) c_tbl);
3899 	}
3900 }
3901 #endif	/* PMAP_DEBUG */
3902 
3903 #ifdef NOT_YET
3904 /* and maybe not ever */
3905 /************************** LOW-LEVEL ROUTINES **************************
3906  * These routines will eventualy be re-written into assembly and placed *
3907  * in locore.s.  They are here now as stubs so that the pmap module can *
3908  * be linked as a standalone user program for testing.                  *
3909  ************************************************************************/
3910 /* flush_atc_crp			INTERNAL
3911  **
3912  * Flush all page descriptors derived from the given CPU Root Pointer
3913  * (CRP), or 'A' table as it is known here, from the 68851's automatic
3914  * cache.
3915  */
3916 void
3917 flush_atc_crp(a_tbl)
3918 {
3919 	mmu_long_rp_t rp;
3920 
3921 	/* Create a temporary root table pointer that points to the
3922 	 * given A table.
3923 	 */
3924 	rp.attr.raw = ~MMU_LONG_RP_LU;
3925 	rp.addr.raw = (unsigned int) a_tbl;
3926 
3927 	mmu_pflushr(&rp);
3928 	/* mmu_pflushr:
3929 	 * 	movel   sp(4)@,a0
3930 	 * 	pflushr a0@
3931 	 *	rts
3932 	 */
3933 }
3934 #endif /* NOT_YET */
3935