xref: /netbsd-src/sys/arch/sun3/sun3x/pmap.c (revision 404fbe5fb94ca1e054339640cabb2801ce52dd30)
1 /*	$NetBSD: pmap.c,v 1.101 2008/12/10 11:10:19 pooka Exp $	*/
2 
3 /*-
4  * Copyright (c) 1996, 1997 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jeremy Cooper.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * XXX These comments aren't quite accurate.  Need to change.
34  * The sun3x uses the MC68851 Memory Management Unit, which is built
35  * into the CPU.  The 68851 maps virtual to physical addresses using
36  * a multi-level table lookup, which is stored in the very memory that
37  * it maps.  The number of levels of lookup is configurable from one
38  * to four.  In this implementation, we use three, named 'A' through 'C'.
39  *
40  * The MMU translates virtual addresses into physical addresses by
41  * traversing these tables in a process called a 'table walk'.  The most
42  * significant 7 bits of the Virtual Address ('VA') being translated are
43  * used as an index into the level A table, whose base in physical memory
44  * is stored in a special MMU register, the 'CPU Root Pointer' or CRP.  The
45  * address found at that index in the A table is used as the base
46  * address for the next table, the B table.  The next six bits of the VA are
47  * used as an index into the B table, which in turn gives the base address
48  * of the third and final C table.
49  *
50  * The next six bits of the VA are used as an index into the C table to
51  * locate a Page Table Entry (PTE).  The PTE is a physical address in memory
52  * to which the remaining 13 bits of the VA are added, producing the
53  * mapped physical address.
54  *
55  * To map the entire memory space in this manner would require 2114296 bytes
56  * of page tables per process - quite expensive.  Instead we will
57  * allocate a fixed but considerably smaller space for the page tables at
58  * the time the VM system is initialized.  When the pmap code is asked by
59  * the kernel to map a VA to a PA, it allocates tables as needed from this
60  * pool.  When there are no more tables in the pool, tables are stolen
61  * from the oldest mapped entries in the tree.  This is only possible
62  * because all memory mappings are stored in the kernel memory map
63  * structures, independent of the pmap structures.  A VA which references
64  * one of these invalidated maps will cause a page fault.  The kernel
65  * will determine that the page fault was caused by a task using a valid
66  * VA, but for some reason (which does not concern it), that address was
67  * not mapped.  It will ask the pmap code to re-map the entry and then
68  * it will resume executing the faulting task.
69  *
70  * In this manner the most efficient use of the page table space is
71  * achieved.  Tasks which do not execute often will have their tables
72  * stolen and reused by tasks which execute more frequently.  The best
73  * size for the page table pool will probably be determined by
74  * experimentation.
75  *
76  * You read all of the comments so far.  Good for you.
77  * Now go play!
78  */
79 
80 /*** A Note About the 68851 Address Translation Cache
81  * The MC68851 has a 64 entry cache, called the Address Translation Cache
82  * or 'ATC'.  This cache stores the most recently used page descriptors
83  * accessed by the MMU when it does translations.  Using a marker called a
84  * 'task alias' the MMU can store the descriptors from 8 different table
85  * spaces concurrently.  The task alias is associated with the base
86  * address of the level A table of that address space.  When an address
87  * space is currently active (the CRP currently points to its A table)
88  * the only cached descriptors that will be obeyed are ones which have a
89  * matching task alias of the current space associated with them.
90  *
91  * Since the cache is always consulted before any table lookups are done,
92  * it is important that it accurately reflect the state of the MMU tables.
93  * Whenever a change has been made to a table that has been loaded into
94  * the MMU, the code must be sure to flush any cached entries that are
95  * affected by the change.  These instances are documented in the code at
96  * various points.
97  */
98 /*** A Note About the Note About the 68851 Address Translation Cache
99  * 4 months into this code I discovered that the sun3x does not have
100  * a MC68851 chip. Instead, it has a version of this MMU that is part of the
101  * the 68030 CPU.
102  * All though it behaves very similarly to the 68851, it only has 1 task
103  * alias and a 22 entry cache.  So sadly (or happily), the first paragraph
104  * of the previous note does not apply to the sun3x pmap.
105  */
106 
107 #include <sys/cdefs.h>
108 __KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.101 2008/12/10 11:10:19 pooka Exp $");
109 
110 #include "opt_ddb.h"
111 #include "opt_pmap_debug.h"
112 
113 #include <sys/param.h>
114 #include <sys/systm.h>
115 #include <sys/proc.h>
116 #include <sys/malloc.h>
117 #include <sys/pool.h>
118 #include <sys/user.h>
119 #include <sys/queue.h>
120 #include <sys/kcore.h>
121 
122 #include <uvm/uvm.h>
123 
124 #include <machine/cpu.h>
125 #include <machine/kcore.h>
126 #include <machine/mon.h>
127 #include <machine/pmap.h>
128 #include <machine/pte.h>
129 #include <machine/vmparam.h>
130 #include <m68k/cacheops.h>
131 
132 #include <sun3/sun3/cache.h>
133 #include <sun3/sun3/machdep.h>
134 
135 #include "pmap_pvt.h"
136 
137 /* XXX - What headers declare these? */
138 extern struct pcb *curpcb;
139 extern int physmem;
140 
141 /* Defined in locore.s */
142 extern char kernel_text[];
143 
144 /* Defined by the linker */
145 extern char etext[], edata[], end[];
146 extern char *esym;	/* DDB */
147 
148 /*************************** DEBUGGING DEFINITIONS ***********************
149  * Macros, preprocessor defines and variables used in debugging can make *
150  * code hard to read.  Anything used exclusively for debugging purposes  *
151  * is defined here to avoid having such mess scattered around the file.  *
152  *************************************************************************/
153 #ifdef	PMAP_DEBUG
154 /*
155  * To aid the debugging process, macros should be expanded into smaller steps
156  * that accomplish the same goal, yet provide convenient places for placing
157  * breakpoints.  When this code is compiled with PMAP_DEBUG mode defined, the
158  * 'INLINE' keyword is defined to an empty string.  This way, any function
159  * defined to be a 'static INLINE' will become 'outlined' and compiled as
160  * a separate function, which is much easier to debug.
161  */
162 #define	INLINE	/* nothing */
163 
164 /*
165  * It is sometimes convenient to watch the activity of a particular table
166  * in the system.  The following variables are used for that purpose.
167  */
168 a_tmgr_t *pmap_watch_atbl = 0;
169 b_tmgr_t *pmap_watch_btbl = 0;
170 c_tmgr_t *pmap_watch_ctbl = 0;
171 
172 int pmap_debug = 0;
173 #define DPRINT(args) if (pmap_debug) printf args
174 
175 #else	/********** Stuff below is defined if NOT debugging **************/
176 
177 #define	INLINE	inline
178 #define DPRINT(args)  /* nada */
179 
180 #endif	/* PMAP_DEBUG */
181 /*********************** END OF DEBUGGING DEFINITIONS ********************/
182 
183 /*** Management Structure - Memory Layout
184  * For every MMU table in the sun3x pmap system there must be a way to
185  * manage it; we must know which process is using it, what other tables
186  * depend on it, and whether or not it contains any locked pages.  This
187  * is solved by the creation of 'table management'  or 'tmgr'
188  * structures.  One for each MMU table in the system.
189  *
190  *                        MAP OF MEMORY USED BY THE PMAP SYSTEM
191  *
192  *      towards lower memory
193  * kernAbase -> +-------------------------------------------------------+
194  *              | Kernel     MMU A level table                          |
195  * kernBbase -> +-------------------------------------------------------+
196  *              | Kernel     MMU B level tables                         |
197  * kernCbase -> +-------------------------------------------------------+
198  *              |                                                       |
199  *              | Kernel     MMU C level tables                         |
200  *              |                                                       |
201  * mmuCbase  -> +-------------------------------------------------------+
202  *              | User       MMU C level tables                         |
203  * mmuAbase  -> +-------------------------------------------------------+
204  *              |                                                       |
205  *              | User       MMU A level tables                         |
206  *              |                                                       |
207  * mmuBbase  -> +-------------------------------------------------------+
208  *              | User       MMU B level tables                         |
209  * tmgrAbase -> +-------------------------------------------------------+
210  *              |  TMGR A level table structures                        |
211  * tmgrBbase -> +-------------------------------------------------------+
212  *              |  TMGR B level table structures                        |
213  * tmgrCbase -> +-------------------------------------------------------+
214  *              |  TMGR C level table structures                        |
215  * pvbase    -> +-------------------------------------------------------+
216  *              |  Physical to Virtual mapping table (list heads)       |
217  * pvebase   -> +-------------------------------------------------------+
218  *              |  Physical to Virtual mapping table (list elements)    |
219  *              |                                                       |
220  *              +-------------------------------------------------------+
221  *      towards higher memory
222  *
223  * For every A table in the MMU A area, there will be a corresponding
224  * a_tmgr structure in the TMGR A area.  The same will be true for
225  * the B and C tables.  This arrangement will make it easy to find the
226  * controling tmgr structure for any table in the system by use of
227  * (relatively) simple macros.
228  */
229 
230 /*
231  * Global variables for storing the base addresses for the areas
232  * labeled above.
233  */
234 static vaddr_t  	kernAphys;
235 static mmu_long_dte_t	*kernAbase;
236 static mmu_short_dte_t	*kernBbase;
237 static mmu_short_pte_t	*kernCbase;
238 static mmu_short_pte_t	*mmuCbase;
239 static mmu_short_dte_t	*mmuBbase;
240 static mmu_long_dte_t	*mmuAbase;
241 static a_tmgr_t		*Atmgrbase;
242 static b_tmgr_t		*Btmgrbase;
243 static c_tmgr_t		*Ctmgrbase;
244 static pv_t 		*pvbase;
245 static pv_elem_t	*pvebase;
246 static struct pmap	kernel_pmap;
247 struct pmap		*const kernel_pmap_ptr = &kernel_pmap;
248 
249 /*
250  * This holds the CRP currently loaded into the MMU.
251  */
252 struct mmu_rootptr kernel_crp;
253 
254 /*
255  * Just all around global variables.
256  */
257 static TAILQ_HEAD(a_pool_head_struct, a_tmgr_struct) a_pool;
258 static TAILQ_HEAD(b_pool_head_struct, b_tmgr_struct) b_pool;
259 static TAILQ_HEAD(c_pool_head_struct, c_tmgr_struct) c_pool;
260 
261 
262 /*
263  * Flags used to mark the safety/availability of certain operations or
264  * resources.
265  */
266 /* Safe to use pmap_bootstrap_alloc(). */
267 static bool bootstrap_alloc_enabled = false;
268 /* Temporary virtual pages are in use */
269 int tmp_vpages_inuse;
270 
271 /*
272  * XXX:  For now, retain the traditional variables that were
273  * used in the old pmap/vm interface (without NONCONTIG).
274  */
275 /* Kernel virtual address space available: */
276 vaddr_t	virtual_avail, virtual_end;
277 /* Physical address space available: */
278 paddr_t	avail_start, avail_end;
279 
280 /* This keep track of the end of the contiguously mapped range. */
281 vaddr_t virtual_contig_end;
282 
283 /* Physical address used by pmap_next_page() */
284 paddr_t avail_next;
285 
286 /* These are used by pmap_copy_page(), etc. */
287 vaddr_t tmp_vpages[2];
288 
289 /* memory pool for pmap structures */
290 struct pool	pmap_pmap_pool;
291 
292 /*
293  * The 3/80 is the only member of the sun3x family that has non-contiguous
294  * physical memory.  Memory is divided into 4 banks which are physically
295  * locatable on the system board.  Although the size of these banks varies
296  * with the size of memory they contain, their base addresses are
297  * permenently fixed.  The following structure, which describes these
298  * banks, is initialized by pmap_bootstrap() after it reads from a similar
299  * structure provided by the ROM Monitor.
300  *
301  * For the other machines in the sun3x architecture which do have contiguous
302  * RAM, this list will have only one entry, which will describe the entire
303  * range of available memory.
304  */
305 struct pmap_physmem_struct avail_mem[SUN3X_NPHYS_RAM_SEGS];
306 u_int total_phys_mem;
307 
308 /*************************************************************************/
309 
310 /*
311  * XXX - Should "tune" these based on statistics.
312  *
313  * My first guess about the relative numbers of these needed is
314  * based on the fact that a "typical" process will have several
315  * pages mapped at low virtual addresses (text, data, bss), then
316  * some mapped shared libraries, and then some stack pages mapped
317  * near the high end of the VA space.  Each process can use only
318  * one A table, and most will use only two B tables (maybe three)
319  * and probably about four C tables.  Therefore, the first guess
320  * at the relative numbers of these needed is 1:2:4 -gwr
321  *
322  * The number of C tables needed is closely related to the amount
323  * of physical memory available plus a certain amount attributable
324  * to the use of double mappings.  With a few simulation statistics
325  * we can find a reasonably good estimation of this unknown value.
326  * Armed with that and the above ratios, we have a good idea of what
327  * is needed at each level. -j
328  *
329  * Note: It is not physical memory memory size, but the total mapped
330  * virtual space required by the combined working sets of all the
331  * currently _runnable_ processes.  (Sleeping ones don't count.)
332  * The amount of physical memory should be irrelevant. -gwr
333  */
334 #ifdef	FIXED_NTABLES
335 #define NUM_A_TABLES	16
336 #define NUM_B_TABLES	32
337 #define NUM_C_TABLES	64
338 #else
339 unsigned int	NUM_A_TABLES, NUM_B_TABLES, NUM_C_TABLES;
340 #endif	/* FIXED_NTABLES */
341 
342 /*
343  * This determines our total virtual mapping capacity.
344  * Yes, it is a FIXED value so we can pre-allocate.
345  */
346 #define NUM_USER_PTES	(NUM_C_TABLES * MMU_C_TBL_SIZE)
347 
348 /*
349  * The size of the Kernel Virtual Address Space (KVAS)
350  * for purposes of MMU table allocation is -KERNBASE
351  * (length from KERNBASE to 0xFFFFffff)
352  */
353 #define	KVAS_SIZE		(-KERNBASE)
354 
355 /* Numbers of kernel MMU tables to support KVAS_SIZE. */
356 #define KERN_B_TABLES	(KVAS_SIZE >> MMU_TIA_SHIFT)
357 #define KERN_C_TABLES	(KVAS_SIZE >> MMU_TIB_SHIFT)
358 #define	NUM_KERN_PTES	(KVAS_SIZE >> MMU_TIC_SHIFT)
359 
360 /*************************** MISCELANEOUS MACROS *************************/
361 #define pmap_lock(pmap) simple_lock(&pmap->pm_lock)
362 #define pmap_unlock(pmap) simple_unlock(&pmap->pm_lock)
363 #define pmap_add_ref(pmap) ++pmap->pm_refcount
364 #define pmap_del_ref(pmap) --pmap->pm_refcount
365 #define pmap_refcount(pmap) pmap->pm_refcount
366 
367 void *pmap_bootstrap_alloc(int);
368 
369 static INLINE void *mmu_ptov(paddr_t);
370 static INLINE paddr_t mmu_vtop(void *);
371 
372 #if	0
373 static INLINE a_tmgr_t *mmuA2tmgr(mmu_long_dte_t *);
374 #endif /* 0 */
375 static INLINE b_tmgr_t *mmuB2tmgr(mmu_short_dte_t *);
376 static INLINE c_tmgr_t *mmuC2tmgr(mmu_short_pte_t *);
377 
378 static INLINE pv_t *pa2pv(paddr_t);
379 static INLINE int   pteidx(mmu_short_pte_t *);
380 static INLINE pmap_t current_pmap(void);
381 
382 /*
383  * We can always convert between virtual and physical addresses
384  * for anything in the range [KERNBASE ... avail_start] because
385  * that range is GUARANTEED to be mapped linearly.
386  * We rely heavily upon this feature!
387  */
388 static INLINE void *
389 mmu_ptov(paddr_t pa)
390 {
391 	vaddr_t va;
392 
393 	va = (pa + KERNBASE);
394 #ifdef	PMAP_DEBUG
395 	if ((va < KERNBASE) || (va >= virtual_contig_end))
396 		panic("mmu_ptov");
397 #endif
398 	return (void *)va;
399 }
400 
401 static INLINE paddr_t
402 mmu_vtop(void *vva)
403 {
404 	vaddr_t va;
405 
406 	va = (vaddr_t)vva;
407 #ifdef	PMAP_DEBUG
408 	if ((va < KERNBASE) || (va >= virtual_contig_end))
409 		panic("mmu_vtop");
410 #endif
411 	return va - KERNBASE;
412 }
413 
414 /*
415  * These macros map MMU tables to their corresponding manager structures.
416  * They are needed quite often because many of the pointers in the pmap
417  * system reference MMU tables and not the structures that control them.
418  * There needs to be a way to find one when given the other and these
419  * macros do so by taking advantage of the memory layout described above.
420  * Here's a quick step through the first macro, mmuA2tmgr():
421  *
422  * 1) find the offset of the given MMU A table from the base of its table
423  *    pool (table - mmuAbase).
424  * 2) convert this offset into a table index by dividing it by the
425  *    size of one MMU 'A' table. (sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE)
426  * 3) use this index to select the corresponding 'A' table manager
427  *    structure from the 'A' table manager pool (Atmgrbase[index]).
428  */
429 /*  This function is not currently used. */
430 #if	0
431 static INLINE a_tmgr_t *
432 mmuA2tmgr(mmu_long_dte_t *mmuAtbl)
433 {
434 	int idx;
435 
436 	/* Which table is this in? */
437 	idx = (mmuAtbl - mmuAbase) / MMU_A_TBL_SIZE;
438 #ifdef	PMAP_DEBUG
439 	if ((idx < 0) || (idx >= NUM_A_TABLES))
440 		panic("mmuA2tmgr");
441 #endif
442 	return &Atmgrbase[idx];
443 }
444 #endif	/* 0 */
445 
446 static INLINE b_tmgr_t *
447 mmuB2tmgr(mmu_short_dte_t *mmuBtbl)
448 {
449 	int idx;
450 
451 	/* Which table is this in? */
452 	idx = (mmuBtbl - mmuBbase) / MMU_B_TBL_SIZE;
453 #ifdef	PMAP_DEBUG
454 	if ((idx < 0) || (idx >= NUM_B_TABLES))
455 		panic("mmuB2tmgr");
456 #endif
457 	return &Btmgrbase[idx];
458 }
459 
460 /* mmuC2tmgr			INTERNAL
461  **
462  * Given a pte known to belong to a C table, return the address of
463  * that table's management structure.
464  */
465 static INLINE c_tmgr_t *
466 mmuC2tmgr(mmu_short_pte_t *mmuCtbl)
467 {
468 	int idx;
469 
470 	/* Which table is this in? */
471 	idx = (mmuCtbl - mmuCbase) / MMU_C_TBL_SIZE;
472 #ifdef	PMAP_DEBUG
473 	if ((idx < 0) || (idx >= NUM_C_TABLES))
474 		panic("mmuC2tmgr");
475 #endif
476 	return &Ctmgrbase[idx];
477 }
478 
479 /* This is now a function call below.
480  * #define pa2pv(pa) \
481  *	(&pvbase[(unsigned long)\
482  *		m68k_btop(pa)\
483  *	])
484  */
485 
486 /* pa2pv			INTERNAL
487  **
488  * Return the pv_list_head element which manages the given physical
489  * address.
490  */
491 static INLINE pv_t *
492 pa2pv(paddr_t pa)
493 {
494 	struct pmap_physmem_struct *bank;
495 	int idx;
496 
497 	bank = &avail_mem[0];
498 	while (pa >= bank->pmem_end)
499 		bank = bank->pmem_next;
500 
501 	pa -= bank->pmem_start;
502 	idx = bank->pmem_pvbase + m68k_btop(pa);
503 #ifdef	PMAP_DEBUG
504 	if ((idx < 0) || (idx >= physmem))
505 		panic("pa2pv");
506 #endif
507 	return &pvbase[idx];
508 }
509 
510 /* pteidx			INTERNAL
511  **
512  * Return the index of the given PTE within the entire fixed table of
513  * PTEs.
514  */
515 static INLINE int
516 pteidx(mmu_short_pte_t *pte)
517 {
518 
519 	return pte - kernCbase;
520 }
521 
522 /*
523  * This just offers a place to put some debugging checks,
524  * and reduces the number of places "curlwp" appears...
525  */
526 static INLINE pmap_t
527 current_pmap(void)
528 {
529 	struct vmspace *vm;
530 	struct vm_map *map;
531 	pmap_t	pmap;
532 
533 	vm = curproc->p_vmspace;
534 	map = &vm->vm_map;
535 	pmap = vm_map_pmap(map);
536 
537 	return pmap;
538 }
539 
540 
541 /*************************** FUNCTION DEFINITIONS ************************
542  * These appear here merely for the compiler to enforce type checking on *
543  * all function calls.                                                   *
544  *************************************************************************/
545 
546 /*
547  * Internal functions
548  */
549 a_tmgr_t *get_a_table(void);
550 b_tmgr_t *get_b_table(void);
551 c_tmgr_t *get_c_table(void);
552 int free_a_table(a_tmgr_t *, bool);
553 int free_b_table(b_tmgr_t *, bool);
554 int free_c_table(c_tmgr_t *, bool);
555 
556 void pmap_bootstrap_aalign(int);
557 void pmap_alloc_usermmu(void);
558 void pmap_alloc_usertmgr(void);
559 void pmap_alloc_pv(void);
560 void pmap_init_a_tables(void);
561 void pmap_init_b_tables(void);
562 void pmap_init_c_tables(void);
563 void pmap_init_pv(void);
564 void pmap_clear_pv(paddr_t, int);
565 static INLINE bool is_managed(paddr_t);
566 
567 bool pmap_remove_a(a_tmgr_t *, vaddr_t, vaddr_t);
568 bool pmap_remove_b(b_tmgr_t *, vaddr_t, vaddr_t);
569 bool pmap_remove_c(c_tmgr_t *, vaddr_t, vaddr_t);
570 void pmap_remove_pte(mmu_short_pte_t *);
571 
572 void pmap_enter_kernel(vaddr_t, paddr_t, vm_prot_t);
573 static INLINE void pmap_remove_kernel(vaddr_t, vaddr_t);
574 static INLINE void pmap_protect_kernel(vaddr_t, vaddr_t, vm_prot_t);
575 static INLINE bool pmap_extract_kernel(vaddr_t, paddr_t *);
576 vaddr_t pmap_get_pteinfo(u_int, pmap_t *, c_tmgr_t **);
577 static INLINE int pmap_dereference(pmap_t);
578 
579 bool pmap_stroll(pmap_t, vaddr_t, a_tmgr_t **, b_tmgr_t **, c_tmgr_t **,
580     mmu_short_pte_t **, int *, int *, int *);
581 void pmap_bootstrap_copyprom(void);
582 void pmap_takeover_mmu(void);
583 void pmap_bootstrap_setprom(void);
584 static void pmap_page_upload(void);
585 
586 #ifdef PMAP_DEBUG
587 /* Debugging function definitions */
588 void  pv_list(paddr_t, int);
589 #endif /* PMAP_DEBUG */
590 
591 /** Interface functions
592  ** - functions required by the Mach VM Pmap interface, with MACHINE_CONTIG
593  **   defined.
594  **   The new UVM doesn't require them so now INTERNAL.
595  **/
596 static INLINE void pmap_pinit(pmap_t);
597 static INLINE void pmap_release(pmap_t);
598 
599 /********************************** CODE ********************************
600  * Functions that are called from other parts of the kernel are labeled *
601  * as 'INTERFACE' functions.  Functions that are only called from       *
602  * within the pmap module are labeled as 'INTERNAL' functions.          *
603  * Functions that are internal, but are not (currently) used at all are *
604  * labeled 'INTERNAL_X'.                                                *
605  ************************************************************************/
606 
607 /* pmap_bootstrap			INTERNAL
608  **
609  * Initializes the pmap system.  Called at boot time from
610  * locore2.c:_vm_init()
611  *
612  * Reminder: having a pmap_bootstrap_alloc() and also having the VM
613  *           system implement pmap_steal_memory() is redundant.
614  *           Don't release this code without removing one or the other!
615  */
616 void
617 pmap_bootstrap(vaddr_t nextva)
618 {
619 	struct physmemory *membank;
620 	struct pmap_physmem_struct *pmap_membank;
621 	vaddr_t va, eva;
622 	paddr_t pa;
623 	int b, c, i, j;	/* running table counts */
624 	int size, resvmem;
625 
626 	/*
627 	 * This function is called by __bootstrap after it has
628 	 * determined the type of machine and made the appropriate
629 	 * patches to the ROM vectors (XXX- I don't quite know what I meant
630 	 * by that.)  It allocates and sets up enough of the pmap system
631 	 * to manage the kernel's address space.
632 	 */
633 
634 	/*
635 	 * Determine the range of kernel virtual and physical
636 	 * space available. Note that we ABSOLUTELY DEPEND on
637 	 * the fact that the first bank of memory (4MB) is
638 	 * mapped linearly to KERNBASE (which we guaranteed in
639 	 * the first instructions of locore.s).
640 	 * That is plenty for our bootstrap work.
641 	 */
642 	virtual_avail = m68k_round_page(nextva);
643 	virtual_contig_end = KERNBASE + 0x400000; /* +4MB */
644 	virtual_end = VM_MAX_KERNEL_ADDRESS;
645 	/* Don't need avail_start til later. */
646 
647 	/* We may now call pmap_bootstrap_alloc(). */
648 	bootstrap_alloc_enabled = true;
649 
650 	/*
651 	 * This is a somewhat unwrapped loop to deal with
652 	 * copying the PROM's 'phsymem' banks into the pmap's
653 	 * banks.  The following is always assumed:
654 	 * 1. There is always at least one bank of memory.
655 	 * 2. There is always a last bank of memory, and its
656 	 *    pmem_next member must be set to NULL.
657 	 */
658 	membank = romVectorPtr->v_physmemory;
659 	pmap_membank = avail_mem;
660 	total_phys_mem = 0;
661 
662 	for (;;) { /* break on !membank */
663 		pmap_membank->pmem_start = membank->address;
664 		pmap_membank->pmem_end = membank->address + membank->size;
665 		total_phys_mem += membank->size;
666 		membank = membank->next;
667 		if (!membank)
668 			break;
669 		/* This silly syntax arises because pmap_membank
670 		 * is really a pre-allocated array, but it is put into
671 		 * use as a linked list.
672 		 */
673 		pmap_membank->pmem_next = pmap_membank + 1;
674 		pmap_membank = pmap_membank->pmem_next;
675 	}
676 	/* This is the last element. */
677 	pmap_membank->pmem_next = NULL;
678 
679 	/*
680 	 * Note: total_phys_mem, physmem represent
681 	 * actual physical memory, including that
682 	 * reserved for the PROM monitor.
683 	 */
684 	physmem = btoc(total_phys_mem);
685 
686 	/*
687 	 * Avail_end is set to the first byte of physical memory
688 	 * after the end of the last bank.  We use this only to
689 	 * determine if a physical address is "managed" memory.
690 	 * This address range should be reduced to prevent the
691 	 * physical pages needed by the PROM monitor from being used
692 	 * in the VM system.
693 	 */
694 	resvmem = total_phys_mem - *(romVectorPtr->memoryAvail);
695 	resvmem = m68k_round_page(resvmem);
696 	avail_end = pmap_membank->pmem_end - resvmem;
697 
698 	/*
699 	 * First allocate enough kernel MMU tables to map all
700 	 * of kernel virtual space from KERNBASE to 0xFFFFFFFF.
701 	 * Note: All must be aligned on 256 byte boundaries.
702 	 * Start with the level-A table (one of those).
703 	 */
704 	size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE;
705 	kernAbase = pmap_bootstrap_alloc(size);
706 	memset(kernAbase, 0, size);
707 
708 	/* Now the level-B kernel tables... */
709 	size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE * KERN_B_TABLES;
710 	kernBbase = pmap_bootstrap_alloc(size);
711 	memset(kernBbase, 0, size);
712 
713 	/* Now the level-C kernel tables... */
714 	size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE * KERN_C_TABLES;
715 	kernCbase = pmap_bootstrap_alloc(size);
716 	memset(kernCbase, 0, size);
717 	/*
718 	 * Note: In order for the PV system to work correctly, the kernel
719 	 * and user-level C tables must be allocated contiguously.
720 	 * Nothing should be allocated between here and the allocation of
721 	 * mmuCbase below.  XXX: Should do this as one allocation, and
722 	 * then compute a pointer for mmuCbase instead of this...
723 	 *
724 	 * Allocate user MMU tables.
725 	 * These must be contiguous with the preceding.
726 	 */
727 
728 #ifndef	FIXED_NTABLES
729 	/*
730 	 * The number of user-level C tables that should be allocated is
731 	 * related to the size of physical memory.  In general, there should
732 	 * be enough tables to map four times the amount of available RAM.
733 	 * The extra amount is needed because some table space is wasted by
734 	 * fragmentation.
735 	 */
736 	NUM_C_TABLES = (total_phys_mem * 4) / (MMU_C_TBL_SIZE * MMU_PAGE_SIZE);
737 	NUM_B_TABLES = NUM_C_TABLES / 2;
738 	NUM_A_TABLES = NUM_B_TABLES / 2;
739 #endif	/* !FIXED_NTABLES */
740 
741 	size = sizeof(mmu_short_pte_t) * MMU_C_TBL_SIZE	* NUM_C_TABLES;
742 	mmuCbase = pmap_bootstrap_alloc(size);
743 
744 	size = sizeof(mmu_short_dte_t) * MMU_B_TBL_SIZE	* NUM_B_TABLES;
745 	mmuBbase = pmap_bootstrap_alloc(size);
746 
747 	size = sizeof(mmu_long_dte_t) * MMU_A_TBL_SIZE * NUM_A_TABLES;
748 	mmuAbase = pmap_bootstrap_alloc(size);
749 
750 	/*
751 	 * Fill in the never-changing part of the kernel tables.
752 	 * For simplicity, the kernel's mappings will be editable as a
753 	 * flat array of page table entries at kernCbase.  The
754 	 * higher level 'A' and 'B' tables must be initialized to point
755 	 * to this lower one.
756 	 */
757 	b = c = 0;
758 
759 	/*
760 	 * Invalidate all mappings below KERNBASE in the A table.
761 	 * This area has already been zeroed out, but it is good
762 	 * practice to explicitly show that we are interpreting
763 	 * it as a list of A table descriptors.
764 	 */
765 	for (i = 0; i < MMU_TIA(KERNBASE); i++) {
766 		kernAbase[i].addr.raw = 0;
767 	}
768 
769 	/*
770 	 * Set up the kernel A and B tables so that they will reference the
771 	 * correct spots in the contiguous table of PTEs allocated for the
772 	 * kernel's virtual memory space.
773 	 */
774 	for (i = MMU_TIA(KERNBASE); i < MMU_A_TBL_SIZE; i++) {
775 		kernAbase[i].attr.raw =
776 		    MMU_LONG_DTE_LU | MMU_LONG_DTE_SUPV | MMU_DT_SHORT;
777 		kernAbase[i].addr.raw = mmu_vtop(&kernBbase[b]);
778 
779 		for (j = 0; j < MMU_B_TBL_SIZE; j++) {
780 			kernBbase[b + j].attr.raw =
781 			    mmu_vtop(&kernCbase[c]) | MMU_DT_SHORT;
782 			c += MMU_C_TBL_SIZE;
783 		}
784 		b += MMU_B_TBL_SIZE;
785 	}
786 
787 	pmap_alloc_usermmu();	/* Allocate user MMU tables.        */
788 	pmap_alloc_usertmgr();	/* Allocate user MMU table managers.*/
789 	pmap_alloc_pv();	/* Allocate physical->virtual map.  */
790 
791 	/*
792 	 * We are now done with pmap_bootstrap_alloc().  Round up
793 	 * `virtual_avail' to the nearest page, and set the flag
794 	 * to prevent use of pmap_bootstrap_alloc() hereafter.
795 	 */
796 	pmap_bootstrap_aalign(PAGE_SIZE);
797 	bootstrap_alloc_enabled = false;
798 
799 	/*
800 	 * Now that we are done with pmap_bootstrap_alloc(), we
801 	 * must save the virtual and physical addresses of the
802 	 * end of the linearly mapped range, which are stored in
803 	 * virtual_contig_end and avail_start, respectively.
804 	 * These variables will never change after this point.
805 	 */
806 	virtual_contig_end = virtual_avail;
807 	avail_start = virtual_avail - KERNBASE;
808 
809 	/*
810 	 * `avail_next' is a running pointer used by pmap_next_page() to
811 	 * keep track of the next available physical page to be handed
812 	 * to the VM system during its initialization, in which it
813 	 * asks for physical pages, one at a time.
814 	 */
815 	avail_next = avail_start;
816 
817 	/*
818 	 * Now allocate some virtual addresses, but not the physical pages
819 	 * behind them.  Note that virtual_avail is already page-aligned.
820 	 *
821 	 * tmp_vpages[] is an array of two virtual pages used for temporary
822 	 * kernel mappings in the pmap module to facilitate various physical
823 	 * address-oritented operations.
824 	 */
825 	tmp_vpages[0] = virtual_avail;
826 	virtual_avail += PAGE_SIZE;
827 	tmp_vpages[1] = virtual_avail;
828 	virtual_avail += PAGE_SIZE;
829 
830 	/** Initialize the PV system **/
831 	pmap_init_pv();
832 
833 	/*
834 	 * Fill in the kernel_pmap structure and kernel_crp.
835 	 */
836 	kernAphys = mmu_vtop(kernAbase);
837 	kernel_pmap.pm_a_tmgr = NULL;
838 	kernel_pmap.pm_a_phys = kernAphys;
839 	kernel_pmap.pm_refcount = 1; /* always in use */
840 	simple_lock_init(&kernel_pmap.pm_lock);
841 
842 	kernel_crp.rp_attr = MMU_LONG_DTE_LU | MMU_DT_LONG;
843 	kernel_crp.rp_addr = kernAphys;
844 
845 	/*
846 	 * Now pmap_enter_kernel() may be used safely and will be
847 	 * the main interface used hereafter to modify the kernel's
848 	 * virtual address space.  Note that since we are still running
849 	 * under the PROM's address table, none of these table modifications
850 	 * actually take effect until pmap_takeover_mmu() is called.
851 	 *
852 	 * Note: Our tables do NOT have the PROM linear mappings!
853 	 * Only the mappings created here exist in our tables, so
854 	 * remember to map anything we expect to use.
855 	 */
856 	va = (vaddr_t)KERNBASE;
857 	pa = 0;
858 
859 	/*
860 	 * The first page of the kernel virtual address space is the msgbuf
861 	 * page.  The page attributes (data, non-cached) are set here, while
862 	 * the address is assigned to this global pointer in cpu_startup().
863 	 * It is non-cached, mostly due to paranoia.
864 	 */
865 	pmap_enter_kernel(va, pa|PMAP_NC, VM_PROT_ALL);
866 	va += PAGE_SIZE;
867 	pa += PAGE_SIZE;
868 
869 	/* Next page is used as the temporary stack. */
870 	pmap_enter_kernel(va, pa, VM_PROT_ALL);
871 	va += PAGE_SIZE;
872 	pa += PAGE_SIZE;
873 
874 	/*
875 	 * Map all of the kernel's text segment as read-only and cacheable.
876 	 * (Cacheable is implied by default).  Unfortunately, the last bytes
877 	 * of kernel text and the first bytes of kernel data will often be
878 	 * sharing the same page.  Therefore, the last page of kernel text
879 	 * has to be mapped as read/write, to accommodate the data.
880 	 */
881 	eva = m68k_trunc_page((vaddr_t)etext);
882 	for (; va < eva; va += PAGE_SIZE, pa += PAGE_SIZE)
883 		pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_EXECUTE);
884 
885 	/*
886 	 * Map all of the kernel's data as read/write and cacheable.
887 	 * This includes: data, BSS, symbols, and everything in the
888 	 * contiguous memory used by pmap_bootstrap_alloc()
889 	 */
890 	for (; pa < avail_start; va += PAGE_SIZE, pa += PAGE_SIZE)
891 		pmap_enter_kernel(va, pa, VM_PROT_READ|VM_PROT_WRITE);
892 
893 	/*
894 	 * At this point we are almost ready to take over the MMU.  But first
895 	 * we must save the PROM's address space in our map, as we call its
896 	 * routines and make references to its data later in the kernel.
897 	 */
898 	pmap_bootstrap_copyprom();
899 	pmap_takeover_mmu();
900 	pmap_bootstrap_setprom();
901 
902 	/* Notify the VM system of our page size. */
903 	uvmexp.pagesize = PAGE_SIZE;
904 	uvm_setpagesize();
905 
906 	pmap_page_upload();
907 }
908 
909 
910 /* pmap_alloc_usermmu			INTERNAL
911  **
912  * Called from pmap_bootstrap() to allocate MMU tables that will
913  * eventually be used for user mappings.
914  */
915 void
916 pmap_alloc_usermmu(void)
917 {
918 
919 	/* XXX: Moved into caller. */
920 }
921 
922 /* pmap_alloc_pv			INTERNAL
923  **
924  * Called from pmap_bootstrap() to allocate the physical
925  * to virtual mapping list.  Each physical page of memory
926  * in the system has a corresponding element in this list.
927  */
928 void
929 pmap_alloc_pv(void)
930 {
931 	int	i;
932 	unsigned int	total_mem;
933 
934 	/*
935 	 * Allocate a pv_head structure for every page of physical
936 	 * memory that will be managed by the system.  Since memory on
937 	 * the 3/80 is non-contiguous, we cannot arrive at a total page
938 	 * count by subtraction of the lowest available address from the
939 	 * highest, but rather we have to step through each memory
940 	 * bank and add the number of pages in each to the total.
941 	 *
942 	 * At this time we also initialize the offset of each bank's
943 	 * starting pv_head within the pv_head list so that the physical
944 	 * memory state routines (pmap_is_referenced(),
945 	 * pmap_is_modified(), et al.) can quickly find coresponding
946 	 * pv_heads in spite of the non-contiguity.
947 	 */
948 	total_mem = 0;
949 	for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
950 		avail_mem[i].pmem_pvbase = m68k_btop(total_mem);
951 		total_mem += avail_mem[i].pmem_end - avail_mem[i].pmem_start;
952 		if (avail_mem[i].pmem_next == NULL)
953 			break;
954 	}
955 	pvbase = (pv_t *)pmap_bootstrap_alloc(sizeof(pv_t) *
956 	    m68k_btop(total_phys_mem));
957 }
958 
959 /* pmap_alloc_usertmgr			INTERNAL
960  **
961  * Called from pmap_bootstrap() to allocate the structures which
962  * facilitate management of user MMU tables.  Each user MMU table
963  * in the system has one such structure associated with it.
964  */
965 void
966 pmap_alloc_usertmgr(void)
967 {
968 	/* Allocate user MMU table managers */
969 	/* It would be a lot simpler to just make these BSS, but */
970 	/* we may want to change their size at boot time... -j */
971 	Atmgrbase =
972 	    (a_tmgr_t *)pmap_bootstrap_alloc(sizeof(a_tmgr_t) * NUM_A_TABLES);
973 	Btmgrbase =
974 	    (b_tmgr_t *)pmap_bootstrap_alloc(sizeof(b_tmgr_t) * NUM_B_TABLES);
975 	Ctmgrbase =
976 	    (c_tmgr_t *)pmap_bootstrap_alloc(sizeof(c_tmgr_t) * NUM_C_TABLES);
977 
978 	/*
979 	 * Allocate PV list elements for the physical to virtual
980 	 * mapping system.
981 	 */
982 	pvebase = (pv_elem_t *)pmap_bootstrap_alloc(sizeof(pv_elem_t) *
983 	    (NUM_USER_PTES + NUM_KERN_PTES));
984 }
985 
986 /* pmap_bootstrap_copyprom()			INTERNAL
987  **
988  * Copy the PROM mappings into our own tables.  Note, we
989  * can use physical addresses until __bootstrap returns.
990  */
991 void
992 pmap_bootstrap_copyprom(void)
993 {
994 	struct sunromvec *romp;
995 	int *mon_ctbl;
996 	mmu_short_pte_t *kpte;
997 	int i, len;
998 
999 	romp = romVectorPtr;
1000 
1001 	/*
1002 	 * Copy the mappings in SUN3X_MON_KDB_BASE...SUN3X_MONEND
1003 	 * Note: mon_ctbl[0] maps SUN3X_MON_KDB_BASE
1004 	 */
1005 	mon_ctbl = *romp->monptaddr;
1006 	i = m68k_btop(SUN3X_MON_KDB_BASE - KERNBASE);
1007 	kpte = &kernCbase[i];
1008 	len = m68k_btop(SUN3X_MONEND - SUN3X_MON_KDB_BASE);
1009 
1010 	for (i = 0; i < len; i++) {
1011 		kpte[i].attr.raw = mon_ctbl[i];
1012 	}
1013 
1014 	/*
1015 	 * Copy the mappings at MON_DVMA_BASE (to the end).
1016 	 * Note, in here, mon_ctbl[0] maps MON_DVMA_BASE.
1017 	 * Actually, we only want the last page, which the
1018 	 * PROM has set up for use by the "ie" driver.
1019 	 * (The i82686 needs its SCP there.)
1020 	 * If we copy all the mappings, pmap_enter_kernel
1021 	 * may complain about finding valid PTEs that are
1022 	 * not recorded in our PV lists...
1023 	 */
1024 	mon_ctbl = *romp->shadowpteaddr;
1025 	i = m68k_btop(SUN3X_MON_DVMA_BASE - KERNBASE);
1026 	kpte = &kernCbase[i];
1027 	len = m68k_btop(SUN3X_MON_DVMA_SIZE);
1028 	for (i = (len - 1); i < len; i++) {
1029 		kpte[i].attr.raw = mon_ctbl[i];
1030 	}
1031 }
1032 
1033 /* pmap_takeover_mmu			INTERNAL
1034  **
1035  * Called from pmap_bootstrap() after it has copied enough of the
1036  * PROM mappings into the kernel map so that we can use our own
1037  * MMU table.
1038  */
1039 void
1040 pmap_takeover_mmu(void)
1041 {
1042 
1043 	loadcrp(&kernel_crp);
1044 }
1045 
1046 /* pmap_bootstrap_setprom()			INTERNAL
1047  **
1048  * Set the PROM mappings so it can see kernel space.
1049  * Note that physical addresses are used here, which
1050  * we can get away with because this runs with the
1051  * low 1GB set for transparent translation.
1052  */
1053 void
1054 pmap_bootstrap_setprom(void)
1055 {
1056 	mmu_long_dte_t *mon_dte;
1057 	extern struct mmu_rootptr mon_crp;
1058 	int i;
1059 
1060 	mon_dte = (mmu_long_dte_t *)mon_crp.rp_addr;
1061 	for (i = MMU_TIA(KERNBASE); i < MMU_TIA(KERN_END); i++) {
1062 		mon_dte[i].attr.raw = kernAbase[i].attr.raw;
1063 		mon_dte[i].addr.raw = kernAbase[i].addr.raw;
1064 	}
1065 }
1066 
1067 
1068 /* pmap_init			INTERFACE
1069  **
1070  * Called at the end of vm_init() to set up the pmap system to go
1071  * into full time operation.  All initialization of kernel_pmap
1072  * should be already done by now, so this should just do things
1073  * needed for user-level pmaps to work.
1074  */
1075 void
1076 pmap_init(void)
1077 {
1078 
1079 	/** Initialize the manager pools **/
1080 	TAILQ_INIT(&a_pool);
1081 	TAILQ_INIT(&b_pool);
1082 	TAILQ_INIT(&c_pool);
1083 
1084 	/**************************************************************
1085 	 * Initialize all tmgr structures and MMU tables they manage. *
1086 	 **************************************************************/
1087 	/** Initialize A tables **/
1088 	pmap_init_a_tables();
1089 	/** Initialize B tables **/
1090 	pmap_init_b_tables();
1091 	/** Initialize C tables **/
1092 	pmap_init_c_tables();
1093 
1094 	/** Initialize the pmap pools **/
1095 	pool_init(&pmap_pmap_pool, sizeof(struct pmap), 0, 0, 0, "pmappl",
1096 	    &pool_allocator_nointr, IPL_NONE);
1097 }
1098 
1099 /* pmap_init_a_tables()			INTERNAL
1100  **
1101  * Initializes all A managers, their MMU A tables, and inserts
1102  * them into the A manager pool for use by the system.
1103  */
1104 void
1105 pmap_init_a_tables(void)
1106 {
1107 	int i;
1108 	a_tmgr_t *a_tbl;
1109 
1110 	for (i = 0; i < NUM_A_TABLES; i++) {
1111 		/* Select the next available A manager from the pool */
1112 		a_tbl = &Atmgrbase[i];
1113 
1114 		/*
1115 		 * Clear its parent entry.  Set its wired and valid
1116 		 * entry count to zero.
1117 		 */
1118 		a_tbl->at_parent = NULL;
1119 		a_tbl->at_wcnt = a_tbl->at_ecnt = 0;
1120 
1121 		/* Assign it the next available MMU A table from the pool */
1122 		a_tbl->at_dtbl = &mmuAbase[i * MMU_A_TBL_SIZE];
1123 
1124 		/*
1125 		 * Initialize the MMU A table with the table in the `proc0',
1126 		 * or kernel, mapping.  This ensures that every process has
1127 		 * the kernel mapped in the top part of its address space.
1128 		 */
1129 		memcpy(a_tbl->at_dtbl, kernAbase,
1130 		    MMU_A_TBL_SIZE * sizeof(mmu_long_dte_t));
1131 
1132 		/*
1133 		 * Finally, insert the manager into the A pool,
1134 		 * making it ready to be used by the system.
1135 		 */
1136 		TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
1137     }
1138 }
1139 
1140 /* pmap_init_b_tables()			INTERNAL
1141  **
1142  * Initializes all B table managers, their MMU B tables, and
1143  * inserts them into the B manager pool for use by the system.
1144  */
1145 void
1146 pmap_init_b_tables(void)
1147 {
1148 	int i, j;
1149 	b_tmgr_t *b_tbl;
1150 
1151 	for (i = 0; i < NUM_B_TABLES; i++) {
1152 		/* Select the next available B manager from the pool */
1153 		b_tbl = &Btmgrbase[i];
1154 
1155 		b_tbl->bt_parent = NULL;	/* clear its parent,  */
1156 		b_tbl->bt_pidx = 0;		/* parent index,      */
1157 		b_tbl->bt_wcnt = 0;		/* wired entry count, */
1158 		b_tbl->bt_ecnt = 0;		/* valid entry count. */
1159 
1160 		/* Assign it the next available MMU B table from the pool */
1161 		b_tbl->bt_dtbl = &mmuBbase[i * MMU_B_TBL_SIZE];
1162 
1163 		/* Invalidate every descriptor in the table */
1164 		for (j = 0; j < MMU_B_TBL_SIZE; j++)
1165 			b_tbl->bt_dtbl[j].attr.raw = MMU_DT_INVALID;
1166 
1167 		/* Insert the manager into the B pool */
1168 		TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
1169 	}
1170 }
1171 
1172 /* pmap_init_c_tables()			INTERNAL
1173  **
1174  * Initializes all C table managers, their MMU C tables, and
1175  * inserts them into the C manager pool for use by the system.
1176  */
1177 void
1178 pmap_init_c_tables(void)
1179 {
1180 	int i, j;
1181 	c_tmgr_t *c_tbl;
1182 
1183 	for (i = 0; i < NUM_C_TABLES; i++) {
1184 		/* Select the next available C manager from the pool */
1185 		c_tbl = &Ctmgrbase[i];
1186 
1187 		c_tbl->ct_parent = NULL;	/* clear its parent,  */
1188 		c_tbl->ct_pidx = 0;		/* parent index,      */
1189 		c_tbl->ct_wcnt = 0;		/* wired entry count, */
1190 		c_tbl->ct_ecnt = 0;		/* valid entry count, */
1191 		c_tbl->ct_pmap = NULL;		/* parent pmap,       */
1192 		c_tbl->ct_va = 0;		/* base of managed range */
1193 
1194 		/* Assign it the next available MMU C table from the pool */
1195 		c_tbl->ct_dtbl = &mmuCbase[i * MMU_C_TBL_SIZE];
1196 
1197 		for (j = 0; j < MMU_C_TBL_SIZE; j++)
1198 			c_tbl->ct_dtbl[j].attr.raw = MMU_DT_INVALID;
1199 
1200 		TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
1201 	}
1202 }
1203 
1204 /* pmap_init_pv()			INTERNAL
1205  **
1206  * Initializes the Physical to Virtual mapping system.
1207  */
1208 void
1209 pmap_init_pv(void)
1210 {
1211 	int i;
1212 
1213 	/* Initialize every PV head. */
1214 	for (i = 0; i < m68k_btop(total_phys_mem); i++) {
1215 		pvbase[i].pv_idx = PVE_EOL;	/* Indicate no mappings */
1216 		pvbase[i].pv_flags = 0;		/* Zero out page flags  */
1217 	}
1218 }
1219 
1220 /* is_managed				INTERNAL
1221  **
1222  * Determine if the given physical address is managed by the PV system.
1223  * Note that this logic assumes that no one will ask for the status of
1224  * addresses which lie in-between the memory banks on the 3/80.  If they
1225  * do so, it will falsely report that it is managed.
1226  *
1227  * Note: A "managed" address is one that was reported to the VM system as
1228  * a "usable page" during system startup.  As such, the VM system expects the
1229  * pmap module to keep an accurate track of the useage of those pages.
1230  * Any page not given to the VM system at startup does not exist (as far as
1231  * the VM system is concerned) and is therefore "unmanaged."  Examples are
1232  * those pages which belong to the ROM monitor and the memory allocated before
1233  * the VM system was started.
1234  */
1235 static INLINE bool
1236 is_managed(paddr_t pa)
1237 {
1238 	if (pa >= avail_start && pa < avail_end)
1239 		return true;
1240 	else
1241 		return false;
1242 }
1243 
1244 /* get_a_table			INTERNAL
1245  **
1246  * Retrieve and return a level A table for use in a user map.
1247  */
1248 a_tmgr_t *
1249 get_a_table(void)
1250 {
1251 	a_tmgr_t *tbl;
1252 	pmap_t pmap;
1253 
1254 	/* Get the top A table in the pool */
1255 	tbl = TAILQ_FIRST(&a_pool);
1256 	if (tbl == NULL) {
1257 		/*
1258 		 * XXX - Instead of panicking here and in other get_x_table
1259 		 * functions, we do have the option of sleeping on the head of
1260 		 * the table pool.  Any function which updates the table pool
1261 		 * would then issue a wakeup() on the head, thus waking up any
1262 		 * processes waiting for a table.
1263 		 *
1264 		 * Actually, the place to sleep would be when some process
1265 		 * asks for a "wired" mapping that would run us short of
1266 		 * mapping resources.  This design DEPENDS on always having
1267 		 * some mapping resources in the pool for stealing, so we
1268 		 * must make sure we NEVER let the pool become empty. -gwr
1269 		 */
1270 		panic("get_a_table: out of A tables.");
1271 	}
1272 
1273 	TAILQ_REMOVE(&a_pool, tbl, at_link);
1274 	/*
1275 	 * If the table has a non-null parent pointer then it is in use.
1276 	 * Forcibly abduct it from its parent and clear its entries.
1277 	 * No re-entrancy worries here.  This table would not be in the
1278 	 * table pool unless it was available for use.
1279 	 *
1280 	 * Note that the second argument to free_a_table() is false.  This
1281 	 * indicates that the table should not be relinked into the A table
1282 	 * pool.  That is a job for the function that called us.
1283 	 */
1284 	if (tbl->at_parent) {
1285 		KASSERT(tbl->at_wcnt == 0);
1286 		pmap = tbl->at_parent;
1287 		free_a_table(tbl, false);
1288 		pmap->pm_a_tmgr = NULL;
1289 		pmap->pm_a_phys = kernAphys;
1290 	}
1291 	return tbl;
1292 }
1293 
1294 /* get_b_table			INTERNAL
1295  **
1296  * Return a level B table for use.
1297  */
1298 b_tmgr_t *
1299 get_b_table(void)
1300 {
1301 	b_tmgr_t *tbl;
1302 
1303 	/* See 'get_a_table' for comments. */
1304 	tbl = TAILQ_FIRST(&b_pool);
1305 	if (tbl == NULL)
1306 		panic("get_b_table: out of B tables.");
1307 	TAILQ_REMOVE(&b_pool, tbl, bt_link);
1308 	if (tbl->bt_parent) {
1309 		KASSERT(tbl->bt_wcnt == 0);
1310 		tbl->bt_parent->at_dtbl[tbl->bt_pidx].attr.raw = MMU_DT_INVALID;
1311 		tbl->bt_parent->at_ecnt--;
1312 		free_b_table(tbl, false);
1313 	}
1314 	return tbl;
1315 }
1316 
1317 /* get_c_table			INTERNAL
1318  **
1319  * Return a level C table for use.
1320  */
1321 c_tmgr_t *
1322 get_c_table(void)
1323 {
1324 	c_tmgr_t *tbl;
1325 
1326 	/* See 'get_a_table' for comments */
1327 	tbl = TAILQ_FIRST(&c_pool);
1328 	if (tbl == NULL)
1329 		panic("get_c_table: out of C tables.");
1330 	TAILQ_REMOVE(&c_pool, tbl, ct_link);
1331 	if (tbl->ct_parent) {
1332 		KASSERT(tbl->ct_wcnt == 0);
1333 		tbl->ct_parent->bt_dtbl[tbl->ct_pidx].attr.raw = MMU_DT_INVALID;
1334 		tbl->ct_parent->bt_ecnt--;
1335 		free_c_table(tbl, false);
1336 	}
1337 	return tbl;
1338 }
1339 
1340 /*
1341  * The following 'free_table' and 'steal_table' functions are called to
1342  * detach tables from their current obligations (parents and children) and
1343  * prepare them for reuse in another mapping.
1344  *
1345  * Free_table is used when the calling function will handle the fate
1346  * of the parent table, such as returning it to the free pool when it has
1347  * no valid entries.  Functions that do not want to handle this should
1348  * call steal_table, in which the parent table's descriptors and entry
1349  * count are automatically modified when this table is removed.
1350  */
1351 
1352 /* free_a_table			INTERNAL
1353  **
1354  * Unmaps the given A table and all child tables from their current
1355  * mappings.  Returns the number of pages that were invalidated.
1356  * If 'relink' is true, the function will return the table to the head
1357  * of the available table pool.
1358  *
1359  * Cache note: The MC68851 will automatically flush all
1360  * descriptors derived from a given A table from its
1361  * Automatic Translation Cache (ATC) if we issue a
1362  * 'PFLUSHR' instruction with the base address of the
1363  * table.  This function should do, and does so.
1364  * Note note: We are using an MC68030 - there is no
1365  * PFLUSHR.
1366  */
1367 int
1368 free_a_table(a_tmgr_t *a_tbl, bool relink)
1369 {
1370 	int i, removed_cnt;
1371 	mmu_long_dte_t	*dte;
1372 	mmu_short_dte_t *dtbl;
1373 	b_tmgr_t	*b_tbl;
1374 	uint8_t at_wired, bt_wired;
1375 
1376 	/*
1377 	 * Flush the ATC cache of all cached descriptors derived
1378 	 * from this table.
1379 	 * Sun3x does not use 68851's cached table feature
1380 	 * flush_atc_crp(mmu_vtop(a_tbl->dte));
1381 	 */
1382 
1383 	/*
1384 	 * Remove any pending cache flushes that were designated
1385 	 * for the pmap this A table belongs to.
1386 	 * a_tbl->parent->atc_flushq[0] = 0;
1387 	 * Not implemented in sun3x.
1388 	 */
1389 
1390 	/*
1391 	 * All A tables in the system should retain a map for the
1392 	 * kernel. If the table contains any valid descriptors
1393 	 * (other than those for the kernel area), invalidate them all,
1394 	 * stopping short of the kernel's entries.
1395 	 */
1396 	removed_cnt = 0;
1397 	at_wired = a_tbl->at_wcnt;
1398 	if (a_tbl->at_ecnt) {
1399 		dte = a_tbl->at_dtbl;
1400 		for (i = 0; i < MMU_TIA(KERNBASE); i++) {
1401 			/*
1402 			 * If a table entry points to a valid B table, free
1403 			 * it and its children.
1404 			 */
1405 			if (MMU_VALID_DT(dte[i])) {
1406 				/*
1407 				 * The following block does several things,
1408 				 * from innermost expression to the
1409 				 * outermost:
1410 				 * 1) It extracts the base (cc 1996)
1411 				 *    address of the B table pointed
1412 				 *    to in the A table entry dte[i].
1413 				 * 2) It converts this base address into
1414 				 *    the virtual address it can be
1415 				 *    accessed with. (all MMU tables point
1416 				 *    to physical addresses.)
1417 				 * 3) It finds the corresponding manager
1418 				 *    structure which manages this MMU table.
1419 				 * 4) It frees the manager structure.
1420 				 *    (This frees the MMU table and all
1421 				 *    child tables. See 'free_b_table' for
1422 				 *    details.)
1423 				 */
1424 				dtbl = mmu_ptov(dte[i].addr.raw);
1425 				b_tbl = mmuB2tmgr(dtbl);
1426 				bt_wired = b_tbl->bt_wcnt;
1427 				removed_cnt += free_b_table(b_tbl, true);
1428 				if (bt_wired)
1429 					a_tbl->at_wcnt--;
1430 				dte[i].attr.raw = MMU_DT_INVALID;
1431 			}
1432 		}
1433 		a_tbl->at_ecnt = 0;
1434 	}
1435 	KASSERT(a_tbl->at_wcnt == 0);
1436 
1437 	if (relink) {
1438 		a_tbl->at_parent = NULL;
1439 		if (!at_wired)
1440 			TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1441 		TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
1442 	}
1443 	return removed_cnt;
1444 }
1445 
1446 /* free_b_table			INTERNAL
1447  **
1448  * Unmaps the given B table and all its children from their current
1449  * mappings.  Returns the number of pages that were invalidated.
1450  * (For comments, see 'free_a_table()').
1451  */
1452 int
1453 free_b_table(b_tmgr_t *b_tbl, bool relink)
1454 {
1455 	int i, removed_cnt;
1456 	mmu_short_dte_t *dte;
1457 	mmu_short_pte_t	*dtbl;
1458 	c_tmgr_t	*c_tbl;
1459 	uint8_t bt_wired, ct_wired;
1460 
1461 	removed_cnt = 0;
1462 	bt_wired = b_tbl->bt_wcnt;
1463 	if (b_tbl->bt_ecnt) {
1464 		dte = b_tbl->bt_dtbl;
1465 		for (i = 0; i < MMU_B_TBL_SIZE; i++) {
1466 			if (MMU_VALID_DT(dte[i])) {
1467 				dtbl = mmu_ptov(MMU_DTE_PA(dte[i]));
1468 				c_tbl = mmuC2tmgr(dtbl);
1469 				ct_wired = c_tbl->ct_wcnt;
1470 				removed_cnt += free_c_table(c_tbl, true);
1471 				if (ct_wired)
1472 					b_tbl->bt_wcnt--;
1473 				dte[i].attr.raw = MMU_DT_INVALID;
1474 			}
1475 		}
1476 		b_tbl->bt_ecnt = 0;
1477 	}
1478 	KASSERT(b_tbl->bt_wcnt == 0);
1479 
1480 	if (relink) {
1481 		b_tbl->bt_parent = NULL;
1482 		if (!bt_wired)
1483 			TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1484 		TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
1485 	}
1486 	return removed_cnt;
1487 }
1488 
1489 /* free_c_table			INTERNAL
1490  **
1491  * Unmaps the given C table from use and returns it to the pool for
1492  * re-use.  Returns the number of pages that were invalidated.
1493  *
1494  * This function preserves any physical page modification information
1495  * contained in the page descriptors within the C table by calling
1496  * 'pmap_remove_pte().'
1497  */
1498 int
1499 free_c_table(c_tmgr_t *c_tbl, bool relink)
1500 {
1501 	mmu_short_pte_t *c_pte;
1502 	int i, removed_cnt;
1503 	uint8_t ct_wired;
1504 
1505 	removed_cnt = 0;
1506 	ct_wired = c_tbl->ct_wcnt;
1507 	if (c_tbl->ct_ecnt) {
1508 		for (i = 0; i < MMU_C_TBL_SIZE; i++) {
1509 			c_pte = &c_tbl->ct_dtbl[i];
1510 			if (MMU_VALID_DT(*c_pte)) {
1511 				if (c_pte->attr.raw & MMU_SHORT_PTE_WIRED)
1512 					c_tbl->ct_wcnt--;
1513 				pmap_remove_pte(c_pte);
1514 				removed_cnt++;
1515 			}
1516 		}
1517 		c_tbl->ct_ecnt = 0;
1518 	}
1519 	KASSERT(c_tbl->ct_wcnt == 0);
1520 
1521 	if (relink) {
1522 		c_tbl->ct_parent = NULL;
1523 		if (!ct_wired)
1524 			TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1525 		TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
1526 	}
1527 	return removed_cnt;
1528 }
1529 
1530 
1531 /* pmap_remove_pte			INTERNAL
1532  **
1533  * Unmap the given pte and preserve any page modification
1534  * information by transfering it to the pv head of the
1535  * physical page it maps to.  This function does not update
1536  * any reference counts because it is assumed that the calling
1537  * function will do so.
1538  */
1539 void
1540 pmap_remove_pte(mmu_short_pte_t *pte)
1541 {
1542 	u_short     pv_idx, targ_idx;
1543 	paddr_t     pa;
1544 	pv_t       *pv;
1545 
1546 	pa = MMU_PTE_PA(*pte);
1547 	if (is_managed(pa)) {
1548 		pv = pa2pv(pa);
1549 		targ_idx = pteidx(pte);	/* Index of PTE being removed    */
1550 
1551 		/*
1552 		 * If the PTE being removed is the first (or only) PTE in
1553 		 * the list of PTEs currently mapped to this page, remove the
1554 		 * PTE by changing the index found on the PV head.  Otherwise
1555 		 * a linear search through the list will have to be executed
1556 		 * in order to find the PVE which points to the PTE being
1557 		 * removed, so that it may be modified to point to its new
1558 		 * neighbor.
1559 		 */
1560 
1561 		pv_idx = pv->pv_idx;	/* Index of first PTE in PV list */
1562 		if (pv_idx == targ_idx) {
1563 			pv->pv_idx = pvebase[targ_idx].pve_next;
1564 		} else {
1565 
1566 			/*
1567 			 * Find the PV element pointing to the target
1568 			 * element.  Note: may have pv_idx==PVE_EOL
1569 			 */
1570 
1571 			for (;;) {
1572 				if (pv_idx == PVE_EOL) {
1573 					goto pv_not_found;
1574 				}
1575 				if (pvebase[pv_idx].pve_next == targ_idx)
1576 					break;
1577 				pv_idx = pvebase[pv_idx].pve_next;
1578 			}
1579 
1580 			/*
1581 			 * At this point, pv_idx is the index of the PV
1582 			 * element just before the target element in the list.
1583 			 * Unlink the target.
1584 			 */
1585 
1586 			pvebase[pv_idx].pve_next = pvebase[targ_idx].pve_next;
1587 		}
1588 
1589 		/*
1590 		 * Save the mod/ref bits of the pte by simply
1591 		 * ORing the entire pte onto the pv_flags member
1592 		 * of the pv structure.
1593 		 * There is no need to use a separate bit pattern
1594 		 * for usage information on the pv head than that
1595 		 * which is used on the MMU ptes.
1596 		 */
1597 
1598  pv_not_found:
1599 		pv->pv_flags |= (u_short) pte->attr.raw;
1600 	}
1601 	pte->attr.raw = MMU_DT_INVALID;
1602 }
1603 
1604 /* pmap_stroll			INTERNAL
1605  **
1606  * Retrieve the addresses of all table managers involved in the mapping of
1607  * the given virtual address.  If the table walk completed successfully,
1608  * return true.  If it was only partially successful, return false.
1609  * The table walk performed by this function is important to many other
1610  * functions in this module.
1611  *
1612  * Note: This function ought to be easier to read.
1613  */
1614 bool
1615 pmap_stroll(pmap_t pmap, vaddr_t va, a_tmgr_t **a_tbl, b_tmgr_t **b_tbl,
1616     c_tmgr_t **c_tbl, mmu_short_pte_t **pte, int *a_idx, int *b_idx,
1617     int *pte_idx)
1618 {
1619 	mmu_long_dte_t *a_dte;   /* A: long descriptor table          */
1620 	mmu_short_dte_t *b_dte;  /* B: short descriptor table         */
1621 
1622 	if (pmap == pmap_kernel())
1623 		return false;
1624 
1625 	/* Does the given pmap have its own A table? */
1626 	*a_tbl = pmap->pm_a_tmgr;
1627 	if (*a_tbl == NULL)
1628 		return false; /* No.  Return unknown. */
1629 	/* Does the A table have a valid B table
1630 	 * under the corresponding table entry?
1631 	 */
1632 	*a_idx = MMU_TIA(va);
1633 	a_dte = &((*a_tbl)->at_dtbl[*a_idx]);
1634 	if (!MMU_VALID_DT(*a_dte))
1635 		return false; /* No. Return unknown. */
1636 	/* Yes. Extract B table from the A table. */
1637 	*b_tbl = mmuB2tmgr(mmu_ptov(a_dte->addr.raw));
1638 	/*
1639 	 * Does the B table have a valid C table
1640 	 * under the corresponding table entry?
1641 	 */
1642 	*b_idx = MMU_TIB(va);
1643 	b_dte = &((*b_tbl)->bt_dtbl[*b_idx]);
1644 	if (!MMU_VALID_DT(*b_dte))
1645 		return false; /* No. Return unknown. */
1646 	/* Yes. Extract C table from the B table. */
1647 	*c_tbl = mmuC2tmgr(mmu_ptov(MMU_DTE_PA(*b_dte)));
1648 	*pte_idx = MMU_TIC(va);
1649 	*pte = &((*c_tbl)->ct_dtbl[*pte_idx]);
1650 
1651 	return true;
1652 }
1653 
1654 /* pmap_enter			INTERFACE
1655  **
1656  * Called by the kernel to map a virtual address
1657  * to a physical address in the given process map.
1658  *
1659  * Note: this function should apply an exclusive lock
1660  * on the pmap system for its duration.  (it certainly
1661  * would save my hair!!)
1662  * This function ought to be easier to read.
1663  */
1664 int
1665 pmap_enter(pmap_t pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
1666 {
1667 	bool insert, managed; /* Marks the need for PV insertion.*/
1668 	u_short nidx;            /* PV list index                     */
1669 	int mapflags;            /* Flags for the mapping (see NOTE1) */
1670 	u_int a_idx, b_idx, pte_idx; /* table indices                 */
1671 	a_tmgr_t *a_tbl;         /* A: long descriptor table manager  */
1672 	b_tmgr_t *b_tbl;         /* B: short descriptor table manager */
1673 	c_tmgr_t *c_tbl;         /* C: short page table manager       */
1674 	mmu_long_dte_t *a_dte;   /* A: long descriptor table          */
1675 	mmu_short_dte_t *b_dte;  /* B: short descriptor table         */
1676 	mmu_short_pte_t *c_pte;  /* C: short page descriptor table    */
1677 	pv_t      *pv;           /* pv list head                      */
1678 	bool wired;         /* is the mapping to be wired?       */
1679 	enum {NONE, NEWA, NEWB, NEWC} llevel; /* used at end   */
1680 
1681 	if (pmap == pmap_kernel()) {
1682 		pmap_enter_kernel(va, pa, prot);
1683 		return 0;
1684 	}
1685 
1686 	/*
1687 	 * Determine if the mapping should be wired.
1688 	 */
1689 	wired = ((flags & PMAP_WIRED) != 0);
1690 
1691 	/*
1692 	 * NOTE1:
1693 	 *
1694 	 * On November 13, 1999, someone changed the pmap_enter() API such
1695 	 * that it now accepts a 'flags' argument.  This new argument
1696 	 * contains bit-flags for the architecture-independent (UVM) system to
1697 	 * use in signalling certain mapping requirements to the architecture-
1698 	 * dependent (pmap) system.  The argument it replaces, 'wired', is now
1699 	 * one of the flags within it.
1700 	 *
1701 	 * In addition to flags signaled by the architecture-independent
1702 	 * system, parts of the architecture-dependent section of the sun3x
1703 	 * kernel pass their own flags in the lower, unused bits of the
1704 	 * physical address supplied to this function.  These flags are
1705 	 * extracted and stored in the temporary variable 'mapflags'.
1706 	 *
1707 	 * Extract sun3x specific flags from the physical address.
1708 	 */
1709 	mapflags = (pa & ~MMU_PAGE_MASK);
1710 	pa &= MMU_PAGE_MASK;
1711 
1712 	/*
1713 	 * Determine if the physical address being mapped is on-board RAM.
1714 	 * Any other area of the address space is likely to belong to a
1715 	 * device and hence it would be disasterous to cache its contents.
1716 	 */
1717 	if ((managed = is_managed(pa)) == false)
1718 		mapflags |= PMAP_NC;
1719 
1720 	/*
1721 	 * For user mappings we walk along the MMU tables of the given
1722 	 * pmap, reaching a PTE which describes the virtual page being
1723 	 * mapped or changed.  If any level of the walk ends in an invalid
1724 	 * entry, a table must be allocated and the entry must be updated
1725 	 * to point to it.
1726 	 * There is a bit of confusion as to whether this code must be
1727 	 * re-entrant.  For now we will assume it is.  To support
1728 	 * re-entrancy we must unlink tables from the table pool before
1729 	 * we assume we may use them.  Tables are re-linked into the pool
1730 	 * when we are finished with them at the end of the function.
1731 	 * But I don't feel like doing that until we have proof that this
1732 	 * needs to be re-entrant.
1733 	 * 'llevel' records which tables need to be relinked.
1734 	 */
1735 	llevel = NONE;
1736 
1737 	/*
1738 	 * Step 1 - Retrieve the A table from the pmap.  If it has no
1739 	 * A table, allocate a new one from the available pool.
1740 	 */
1741 
1742 	a_tbl = pmap->pm_a_tmgr;
1743 	if (a_tbl == NULL) {
1744 		/*
1745 		 * This pmap does not currently have an A table.  Allocate
1746 		 * a new one.
1747 		 */
1748 		a_tbl = get_a_table();
1749 		a_tbl->at_parent = pmap;
1750 
1751 		/*
1752 		 * Assign this new A table to the pmap, and calculate its
1753 		 * physical address so that loadcrp() can be used to make
1754 		 * the table active.
1755 		 */
1756 		pmap->pm_a_tmgr = a_tbl;
1757 		pmap->pm_a_phys = mmu_vtop(a_tbl->at_dtbl);
1758 
1759 		/*
1760 		 * If the process receiving a new A table is the current
1761 		 * process, we are responsible for setting the MMU so that
1762 		 * it becomes the current address space.  This only adds
1763 		 * new mappings, so no need to flush anything.
1764 		 */
1765 		if (pmap == current_pmap()) {
1766 			kernel_crp.rp_addr = pmap->pm_a_phys;
1767 			loadcrp(&kernel_crp);
1768 		}
1769 
1770 		if (!wired)
1771 			llevel = NEWA;
1772 	} else {
1773 		/*
1774 		 * Use the A table already allocated for this pmap.
1775 		 * Unlink it from the A table pool if necessary.
1776 		 */
1777 		if (wired && !a_tbl->at_wcnt)
1778 			TAILQ_REMOVE(&a_pool, a_tbl, at_link);
1779 	}
1780 
1781 	/*
1782 	 * Step 2 - Walk into the B table.  If there is no valid B table,
1783 	 * allocate one.
1784 	 */
1785 
1786 	a_idx = MMU_TIA(va);            /* Calculate the TIA of the VA. */
1787 	a_dte = &a_tbl->at_dtbl[a_idx]; /* Retrieve descriptor from table */
1788 	if (MMU_VALID_DT(*a_dte)) {     /* Is the descriptor valid? */
1789 		/* The descriptor is valid.  Use the B table it points to. */
1790 		/*************************************
1791 		 *               a_idx               *
1792 		 *                 v                 *
1793 		 * a_tbl -> +-+-+-+-+-+-+-+-+-+-+-+- *
1794 		 *          | | | | | | | | | | | |  *
1795 		 *          +-+-+-+-+-+-+-+-+-+-+-+- *
1796 		 *                 |                 *
1797 		 *                 \- b_tbl -> +-+-  *
1798 		 *                             | |   *
1799 		 *                             +-+-  *
1800 		 *************************************/
1801 		b_dte = mmu_ptov(a_dte->addr.raw);
1802 		b_tbl = mmuB2tmgr(b_dte);
1803 
1804 		/*
1805 		 * If the requested mapping must be wired, but this table
1806 		 * being used to map it is not, the table must be removed
1807 		 * from the available pool and its wired entry count
1808 		 * incremented.
1809 		 */
1810 		if (wired && !b_tbl->bt_wcnt) {
1811 			TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
1812 			a_tbl->at_wcnt++;
1813 		}
1814 	} else {
1815 		/* The descriptor is invalid.  Allocate a new B table. */
1816 		b_tbl = get_b_table();
1817 
1818 		/* Point the parent A table descriptor to this new B table. */
1819 		a_dte->addr.raw = mmu_vtop(b_tbl->bt_dtbl);
1820 		a_dte->attr.raw = MMU_LONG_DTE_LU | MMU_DT_SHORT;
1821 		a_tbl->at_ecnt++; /* Update parent's valid entry count */
1822 
1823 		/* Create the necessary back references to the parent table */
1824 		b_tbl->bt_parent = a_tbl;
1825 		b_tbl->bt_pidx = a_idx;
1826 
1827 		/*
1828 		 * If this table is to be wired, make sure the parent A table
1829 		 * wired count is updated to reflect that it has another wired
1830 		 * entry.
1831 		 */
1832 		if (wired)
1833 			a_tbl->at_wcnt++;
1834 		else if (llevel == NONE)
1835 			llevel = NEWB;
1836 	}
1837 
1838 	/*
1839 	 * Step 3 - Walk into the C table, if there is no valid C table,
1840 	 * allocate one.
1841 	 */
1842 
1843 	b_idx = MMU_TIB(va);            /* Calculate the TIB of the VA */
1844 	b_dte = &b_tbl->bt_dtbl[b_idx]; /* Retrieve descriptor from table */
1845 	if (MMU_VALID_DT(*b_dte)) {     /* Is the descriptor valid? */
1846 		/* The descriptor is valid.  Use the C table it points to. */
1847 		/**************************************
1848 		 *               c_idx                *
1849 		 * |                v                 *
1850 		 * \- b_tbl -> +-+-+-+-+-+-+-+-+-+-+- *
1851 		 *             | | | | | | | | | | |  *
1852 		 *             +-+-+-+-+-+-+-+-+-+-+- *
1853 		 *                  |                 *
1854 		 *                  \- c_tbl -> +-+-- *
1855 		 *                              | | | *
1856 		 *                              +-+-- *
1857 		 **************************************/
1858 		c_pte = mmu_ptov(MMU_PTE_PA(*b_dte));
1859 		c_tbl = mmuC2tmgr(c_pte);
1860 
1861 		/* If mapping is wired and table is not */
1862 		if (wired && !c_tbl->ct_wcnt) {
1863 			TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
1864 			b_tbl->bt_wcnt++;
1865 		}
1866 	} else {
1867 		/* The descriptor is invalid.  Allocate a new C table. */
1868 		c_tbl = get_c_table();
1869 
1870 		/* Point the parent B table descriptor to this new C table. */
1871 		b_dte->attr.raw = mmu_vtop(c_tbl->ct_dtbl);
1872 		b_dte->attr.raw |= MMU_DT_SHORT;
1873 		b_tbl->bt_ecnt++; /* Update parent's valid entry count */
1874 
1875 		/* Create the necessary back references to the parent table */
1876 		c_tbl->ct_parent = b_tbl;
1877 		c_tbl->ct_pidx = b_idx;
1878 		/*
1879 		 * Store the pmap and base virtual managed address for faster
1880 		 * retrieval in the PV functions.
1881 		 */
1882 		c_tbl->ct_pmap = pmap;
1883 		c_tbl->ct_va = (va & (MMU_TIA_MASK|MMU_TIB_MASK));
1884 
1885 		/*
1886 		 * If this table is to be wired, make sure the parent B table
1887 		 * wired count is updated to reflect that it has another wired
1888 		 * entry.
1889 		 */
1890 		if (wired)
1891 			b_tbl->bt_wcnt++;
1892 		else if (llevel == NONE)
1893 			llevel = NEWC;
1894 	}
1895 
1896 	/*
1897 	 * Step 4 - Deposit a page descriptor (PTE) into the appropriate
1898 	 * slot of the C table, describing the PA to which the VA is mapped.
1899 	 */
1900 
1901 	pte_idx = MMU_TIC(va);
1902 	c_pte = &c_tbl->ct_dtbl[pte_idx];
1903 	if (MMU_VALID_DT(*c_pte)) { /* Is the entry currently valid? */
1904 		/*
1905 		 * The PTE is currently valid.  This particular call
1906 		 * is just a synonym for one (or more) of the following
1907 		 * operations:
1908 		 *     change protection of a page
1909 		 *     change wiring status of a page
1910 		 *     remove the mapping of a page
1911 		 */
1912 
1913 		/* First check if this is a wiring operation. */
1914 		if (c_pte->attr.raw & MMU_SHORT_PTE_WIRED) {
1915 			/*
1916 			 * The existing mapping is wired, so adjust wired
1917 			 * entry count here. If new mapping is still wired,
1918 			 * wired entry count will be incremented again later.
1919 			 */
1920 			c_tbl->ct_wcnt--;
1921 			if (!wired) {
1922 				/*
1923 				 * The mapping of this PTE is being changed
1924 				 * from wired to unwired.
1925 				 * Adjust wired entry counts in each table and
1926 				 * set llevel flag to put unwired tables back
1927 				 * into the active pool.
1928 				 */
1929 				if (c_tbl->ct_wcnt == 0) {
1930 					llevel = NEWC;
1931 					if (--b_tbl->bt_wcnt == 0) {
1932 						llevel = NEWB;
1933 						if (--a_tbl->at_wcnt == 0) {
1934 							llevel = NEWA;
1935 						}
1936 					}
1937 				}
1938 			}
1939 		}
1940 
1941 		/* Is the new address the same as the old? */
1942 		if (MMU_PTE_PA(*c_pte) == pa) {
1943 			/*
1944 			 * Yes, mark that it does not need to be reinserted
1945 			 * into the PV list.
1946 			 */
1947 			insert = false;
1948 
1949 			/*
1950 			 * Clear all but the modified, referenced and wired
1951 			 * bits on the PTE.
1952 			 */
1953 			c_pte->attr.raw &= (MMU_SHORT_PTE_M
1954 			    | MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED);
1955 		} else {
1956 			/* No, remove the old entry */
1957 			pmap_remove_pte(c_pte);
1958 			insert = true;
1959 		}
1960 
1961 		/*
1962 		 * TLB flush is only necessary if modifying current map.
1963 		 * However, in pmap_enter(), the pmap almost always IS
1964 		 * the current pmap, so don't even bother to check.
1965 		 */
1966 		TBIS(va);
1967 	} else {
1968 		/*
1969 		 * The PTE is invalid.  Increment the valid entry count in
1970 		 * the C table manager to reflect the addition of a new entry.
1971 		 */
1972 		c_tbl->ct_ecnt++;
1973 
1974 		/* XXX - temporarily make sure the PTE is cleared. */
1975 		c_pte->attr.raw = 0;
1976 
1977 		/* It will also need to be inserted into the PV list. */
1978 		insert = true;
1979 	}
1980 
1981 	/*
1982 	 * If page is changing from unwired to wired status, set an unused bit
1983 	 * within the PTE to indicate that it is wired.  Also increment the
1984 	 * wired entry count in the C table manager.
1985 	 */
1986 	if (wired) {
1987 		c_pte->attr.raw |= MMU_SHORT_PTE_WIRED;
1988 		c_tbl->ct_wcnt++;
1989 	}
1990 
1991 	/*
1992 	 * Map the page, being careful to preserve modify/reference/wired
1993 	 * bits.  At this point it is assumed that the PTE either has no bits
1994 	 * set, or if there are set bits, they are only modified, reference or
1995 	 * wired bits.  If not, the following statement will cause erratic
1996 	 * behavior.
1997 	 */
1998 #ifdef	PMAP_DEBUG
1999 	if (c_pte->attr.raw & ~(MMU_SHORT_PTE_M |
2000 		MMU_SHORT_PTE_USED | MMU_SHORT_PTE_WIRED)) {
2001 		printf("pmap_enter: junk left in PTE at %p\n", c_pte);
2002 		Debugger();
2003 	}
2004 #endif
2005 	c_pte->attr.raw |= ((u_long) pa | MMU_DT_PAGE);
2006 
2007 	/*
2008 	 * If the mapping should be read-only, set the write protect
2009 	 * bit in the PTE.
2010 	 */
2011 	if (!(prot & VM_PROT_WRITE))
2012 		c_pte->attr.raw |= MMU_SHORT_PTE_WP;
2013 
2014 	/*
2015 	 * Mark the PTE as used and/or modified as specified by the flags arg.
2016 	 */
2017 	if (flags & VM_PROT_ALL) {
2018 		c_pte->attr.raw |= MMU_SHORT_PTE_USED;
2019 		if (flags & VM_PROT_WRITE) {
2020 			c_pte->attr.raw |= MMU_SHORT_PTE_M;
2021 		}
2022 	}
2023 
2024 	/*
2025 	 * If the mapping should be cache inhibited (indicated by the flag
2026 	 * bits found on the lower order of the physical address.)
2027 	 * mark the PTE as a cache inhibited page.
2028 	 */
2029 	if (mapflags & PMAP_NC)
2030 		c_pte->attr.raw |= MMU_SHORT_PTE_CI;
2031 
2032 	/*
2033 	 * If the physical address being mapped is managed by the PV
2034 	 * system then link the pte into the list of pages mapped to that
2035 	 * address.
2036 	 */
2037 	if (insert && managed) {
2038 		pv = pa2pv(pa);
2039 		nidx = pteidx(c_pte);
2040 
2041 		pvebase[nidx].pve_next = pv->pv_idx;
2042 		pv->pv_idx = nidx;
2043 	}
2044 
2045 	/* Move any allocated or unwired tables back into the active pool. */
2046 
2047 	switch (llevel) {
2048 		case NEWA:
2049 			TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2050 			/* FALLTHROUGH */
2051 		case NEWB:
2052 			TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2053 			/* FALLTHROUGH */
2054 		case NEWC:
2055 			TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2056 			/* FALLTHROUGH */
2057 		default:
2058 			break;
2059 	}
2060 
2061 	return 0;
2062 }
2063 
2064 /* pmap_enter_kernel			INTERNAL
2065  **
2066  * Map the given virtual address to the given physical address within the
2067  * kernel address space.  This function exists because the kernel map does
2068  * not do dynamic table allocation.  It consists of a contiguous array of ptes
2069  * and can be edited directly without the need to walk through any tables.
2070  *
2071  * XXX: "Danger, Will Robinson!"
2072  * Note that the kernel should never take a fault on any page
2073  * between [ KERNBASE .. virtual_avail ] and this is checked in
2074  * trap.c for kernel-mode MMU faults.  This means that mappings
2075  * created in that range must be implicily wired. -gwr
2076  */
2077 void
2078 pmap_enter_kernel(vaddr_t va, paddr_t pa, vm_prot_t prot)
2079 {
2080 	bool       was_valid, insert;
2081 	u_short         pte_idx;
2082 	int             flags;
2083 	mmu_short_pte_t *pte;
2084 	pv_t            *pv;
2085 	paddr_t     old_pa;
2086 
2087 	flags = (pa & ~MMU_PAGE_MASK);
2088 	pa &= MMU_PAGE_MASK;
2089 
2090 	if (is_managed(pa))
2091 		insert = true;
2092 	else
2093 		insert = false;
2094 
2095 	/*
2096 	 * Calculate the index of the PTE being modified.
2097 	 */
2098 	pte_idx = (u_long)m68k_btop(va - KERNBASE);
2099 
2100 	/* This array is traditionally named "Sysmap" */
2101 	pte = &kernCbase[pte_idx];
2102 
2103 	if (MMU_VALID_DT(*pte)) {
2104 		was_valid = true;
2105 		/*
2106 		 * If the PTE already maps a different
2107 		 * physical address, umap and pv_unlink.
2108 		 */
2109 		old_pa = MMU_PTE_PA(*pte);
2110 		if (pa != old_pa)
2111 			pmap_remove_pte(pte);
2112 		else {
2113 		    /*
2114 		     * Old PA and new PA are the same.  No need to
2115 		     * relink the mapping within the PV list.
2116 		     */
2117 		     insert = false;
2118 
2119 		    /*
2120 		     * Save any mod/ref bits on the PTE.
2121 		     */
2122 		    pte->attr.raw &= (MMU_SHORT_PTE_USED|MMU_SHORT_PTE_M);
2123 		}
2124 	} else {
2125 		pte->attr.raw = MMU_DT_INVALID;
2126 		was_valid = false;
2127 	}
2128 
2129 	/*
2130 	 * Map the page.  Being careful to preserve modified/referenced bits
2131 	 * on the PTE.
2132 	 */
2133 	pte->attr.raw |= (pa | MMU_DT_PAGE);
2134 
2135 	if (!(prot & VM_PROT_WRITE)) /* If access should be read-only */
2136 		pte->attr.raw |= MMU_SHORT_PTE_WP;
2137 	if (flags & PMAP_NC)
2138 		pte->attr.raw |= MMU_SHORT_PTE_CI;
2139 	if (was_valid)
2140 		TBIS(va);
2141 
2142 	/*
2143 	 * Insert the PTE into the PV system, if need be.
2144 	 */
2145 	if (insert) {
2146 		pv = pa2pv(pa);
2147 		pvebase[pte_idx].pve_next = pv->pv_idx;
2148 		pv->pv_idx = pte_idx;
2149 	}
2150 }
2151 
2152 void
2153 pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot)
2154 {
2155 	mmu_short_pte_t	*pte;
2156 
2157 	/* This array is traditionally named "Sysmap" */
2158 	pte = &kernCbase[(u_long)m68k_btop(va - KERNBASE)];
2159 
2160 	KASSERT(!MMU_VALID_DT(*pte));
2161 	pte->attr.raw = MMU_DT_INVALID | MMU_DT_PAGE | (pa & MMU_PAGE_MASK);
2162 	if (!(prot & VM_PROT_WRITE))
2163 		pte->attr.raw |= MMU_SHORT_PTE_WP;
2164 }
2165 
2166 void
2167 pmap_kremove(vaddr_t va, vsize_t len)
2168 {
2169 	int idx, eidx;
2170 
2171 #ifdef	PMAP_DEBUG
2172 	if ((va & PGOFSET) || (len & PGOFSET))
2173 		panic("pmap_kremove: alignment");
2174 #endif
2175 
2176 	idx  = m68k_btop(va - KERNBASE);
2177 	eidx = m68k_btop(va + len - KERNBASE);
2178 
2179 	while (idx < eidx) {
2180 		kernCbase[idx++].attr.raw = MMU_DT_INVALID;
2181 		TBIS(va);
2182 		va += PAGE_SIZE;
2183 	}
2184 }
2185 
2186 /* pmap_map			INTERNAL
2187  **
2188  * Map a contiguous range of physical memory into a contiguous range of
2189  * the kernel virtual address space.
2190  *
2191  * Used for device mappings and early mapping of the kernel text/data/bss.
2192  * Returns the first virtual address beyond the end of the range.
2193  */
2194 vaddr_t
2195 pmap_map(vaddr_t va, paddr_t pa, paddr_t endpa, int prot)
2196 {
2197 	int sz;
2198 
2199 	sz = endpa - pa;
2200 	do {
2201 		pmap_enter_kernel(va, pa, prot);
2202 		va += PAGE_SIZE;
2203 		pa += PAGE_SIZE;
2204 		sz -= PAGE_SIZE;
2205 	} while (sz > 0);
2206 	pmap_update(pmap_kernel());
2207 	return va;
2208 }
2209 
2210 /* pmap_protect_kernel			INTERNAL
2211  **
2212  * Apply the given protection code to a kernel address range.
2213  */
2214 static INLINE void
2215 pmap_protect_kernel(vaddr_t startva, vaddr_t endva, vm_prot_t prot)
2216 {
2217 	vaddr_t va;
2218 	mmu_short_pte_t *pte;
2219 
2220 	pte = &kernCbase[(unsigned long) m68k_btop(startva - KERNBASE)];
2221 	for (va = startva; va < endva; va += PAGE_SIZE, pte++) {
2222 		if (MMU_VALID_DT(*pte)) {
2223 		    switch (prot) {
2224 		        case VM_PROT_ALL:
2225 		            break;
2226 		        case VM_PROT_EXECUTE:
2227 		        case VM_PROT_READ:
2228 		        case VM_PROT_READ|VM_PROT_EXECUTE:
2229 		            pte->attr.raw |= MMU_SHORT_PTE_WP;
2230 		            break;
2231 		        case VM_PROT_NONE:
2232 		            /* this is an alias for 'pmap_remove_kernel' */
2233 		            pmap_remove_pte(pte);
2234 		            break;
2235 		        default:
2236 		            break;
2237 		    }
2238 		    /*
2239 		     * since this is the kernel, immediately flush any cached
2240 		     * descriptors for this address.
2241 		     */
2242 		    TBIS(va);
2243 		}
2244 	}
2245 }
2246 
2247 /* pmap_protect			INTERFACE
2248  **
2249  * Apply the given protection to the given virtual address range within
2250  * the given map.
2251  *
2252  * It is ok for the protection applied to be stronger than what is
2253  * specified.  We use this to our advantage when the given map has no
2254  * mapping for the virtual address.  By skipping a page when this
2255  * is discovered, we are effectively applying a protection of VM_PROT_NONE,
2256  * and therefore do not need to map the page just to apply a protection
2257  * code.  Only pmap_enter() needs to create new mappings if they do not exist.
2258  *
2259  * XXX - This function could be speeded up by using pmap_stroll() for inital
2260  *       setup, and then manual scrolling in the for() loop.
2261  */
2262 void
2263 pmap_protect(pmap_t pmap, vaddr_t startva, vaddr_t endva, vm_prot_t prot)
2264 {
2265 	bool iscurpmap;
2266 	int a_idx, b_idx, c_idx;
2267 	a_tmgr_t *a_tbl;
2268 	b_tmgr_t *b_tbl;
2269 	c_tmgr_t *c_tbl;
2270 	mmu_short_pte_t *pte;
2271 
2272 	if (pmap == pmap_kernel()) {
2273 		pmap_protect_kernel(startva, endva, prot);
2274 		return;
2275 	}
2276 
2277 	/*
2278 	 * In this particular pmap implementation, there are only three
2279 	 * types of memory protection: 'all' (read/write/execute),
2280 	 * 'read-only' (read/execute) and 'none' (no mapping.)
2281 	 * It is not possible for us to treat 'executable' as a separate
2282 	 * protection type.  Therefore, protection requests that seek to
2283 	 * remove execute permission while retaining read or write, and those
2284 	 * that make little sense (write-only for example) are ignored.
2285 	 */
2286 	switch (prot) {
2287 		case VM_PROT_NONE:
2288 			/*
2289 			 * A request to apply the protection code of
2290 			 * 'VM_PROT_NONE' is a synonym for pmap_remove().
2291 			 */
2292 			pmap_remove(pmap, startva, endva);
2293 			return;
2294 		case	VM_PROT_EXECUTE:
2295 		case	VM_PROT_READ:
2296 		case	VM_PROT_READ|VM_PROT_EXECUTE:
2297 			/* continue */
2298 			break;
2299 		case	VM_PROT_WRITE:
2300 		case	VM_PROT_WRITE|VM_PROT_READ:
2301 		case	VM_PROT_WRITE|VM_PROT_EXECUTE:
2302 		case	VM_PROT_ALL:
2303 			/* None of these should happen in a sane system. */
2304 			return;
2305 	}
2306 
2307 	/*
2308 	 * If the pmap has no A table, it has no mappings and therefore
2309 	 * there is nothing to protect.
2310 	 */
2311 	if ((a_tbl = pmap->pm_a_tmgr) == NULL)
2312 		return;
2313 
2314 	a_idx = MMU_TIA(startva);
2315 	b_idx = MMU_TIB(startva);
2316 	c_idx = MMU_TIC(startva);
2317 	b_tbl = NULL;
2318 	c_tbl = NULL;
2319 
2320 	iscurpmap = (pmap == current_pmap());
2321 	while (startva < endva) {
2322 		if (b_tbl || MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
2323 		  if (b_tbl == NULL) {
2324 		    b_tbl = (b_tmgr_t *) a_tbl->at_dtbl[a_idx].addr.raw;
2325 		    b_tbl = mmu_ptov((vaddr_t)b_tbl);
2326 		    b_tbl = mmuB2tmgr((mmu_short_dte_t *)b_tbl);
2327 		  }
2328 		  if (c_tbl || MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
2329 		    if (c_tbl == NULL) {
2330 		      c_tbl = (c_tmgr_t *) MMU_DTE_PA(b_tbl->bt_dtbl[b_idx]);
2331 		      c_tbl = mmu_ptov((vaddr_t)c_tbl);
2332 		      c_tbl = mmuC2tmgr((mmu_short_pte_t *)c_tbl);
2333 		    }
2334 		    if (MMU_VALID_DT(c_tbl->ct_dtbl[c_idx])) {
2335 		      pte = &c_tbl->ct_dtbl[c_idx];
2336 		      /* make the mapping read-only */
2337 		      pte->attr.raw |= MMU_SHORT_PTE_WP;
2338 		      /*
2339 		       * If we just modified the current address space,
2340 		       * flush any translations for the modified page from
2341 		       * the translation cache and any data from it in the
2342 		       * data cache.
2343 		       */
2344 		      if (iscurpmap)
2345 		          TBIS(startva);
2346 		    }
2347 		    startva += PAGE_SIZE;
2348 
2349 		    if (++c_idx >= MMU_C_TBL_SIZE) { /* exceeded C table? */
2350 		      c_tbl = NULL;
2351 		      c_idx = 0;
2352 		      if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2353 		        b_tbl = NULL;
2354 		        b_idx = 0;
2355 		      }
2356 		    }
2357 		  } else { /* C table wasn't valid */
2358 		    c_tbl = NULL;
2359 		    c_idx = 0;
2360 		    startva += MMU_TIB_RANGE;
2361 		    if (++b_idx >= MMU_B_TBL_SIZE) { /* exceeded B table? */
2362 		      b_tbl = NULL;
2363 		      b_idx = 0;
2364 		    }
2365 		  } /* C table */
2366 		} else { /* B table wasn't valid */
2367 		  b_tbl = NULL;
2368 		  b_idx = 0;
2369 		  startva += MMU_TIA_RANGE;
2370 		  a_idx++;
2371 		} /* B table */
2372 	}
2373 }
2374 
2375 /* pmap_unwire				INTERFACE
2376  **
2377  * Clear the wired attribute of the specified page.
2378  *
2379  * This function is called from vm_fault.c to unwire
2380  * a mapping.
2381  */
2382 void
2383 pmap_unwire(pmap_t pmap, vaddr_t va)
2384 {
2385 	int a_idx, b_idx, c_idx;
2386 	a_tmgr_t *a_tbl;
2387 	b_tmgr_t *b_tbl;
2388 	c_tmgr_t *c_tbl;
2389 	mmu_short_pte_t *pte;
2390 
2391 	/* Kernel mappings always remain wired. */
2392 	if (pmap == pmap_kernel())
2393 		return;
2394 
2395 	/*
2396 	 * Walk through the tables.  If the walk terminates without
2397 	 * a valid PTE then the address wasn't wired in the first place.
2398 	 * Return immediately.
2399 	 */
2400 	if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl, &pte, &a_idx,
2401 		&b_idx, &c_idx) == false)
2402 		return;
2403 
2404 
2405 	/* Is the PTE wired?  If not, return. */
2406 	if (!(pte->attr.raw & MMU_SHORT_PTE_WIRED))
2407 		return;
2408 
2409 	/* Remove the wiring bit. */
2410 	pte->attr.raw &= ~(MMU_SHORT_PTE_WIRED);
2411 
2412 	/*
2413 	 * Decrement the wired entry count in the C table.
2414 	 * If it reaches zero the following things happen:
2415 	 * 1. The table no longer has any wired entries and is considered
2416 	 *    unwired.
2417 	 * 2. It is placed on the available queue.
2418 	 * 3. The parent table's wired entry count is decremented.
2419 	 * 4. If it reaches zero, this process repeats at step 1 and
2420 	 *    stops at after reaching the A table.
2421 	 */
2422 	if (--c_tbl->ct_wcnt == 0) {
2423 		TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
2424 		if (--b_tbl->bt_wcnt == 0) {
2425 			TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
2426 			if (--a_tbl->at_wcnt == 0) {
2427 				TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
2428 			}
2429 		}
2430 	}
2431 }
2432 
2433 /* pmap_copy				INTERFACE
2434  **
2435  * Copy the mappings of a range of addresses in one pmap, into
2436  * the destination address of another.
2437  *
2438  * This routine is advisory.  Should we one day decide that MMU tables
2439  * may be shared by more than one pmap, this function should be used to
2440  * link them together.  Until that day however, we do nothing.
2441  */
2442 void
2443 pmap_copy(pmap_t pmap_a, pmap_t pmap_b, vaddr_t dst, vsize_t len, vaddr_t src)
2444 {
2445 
2446 	/* not implemented. */
2447 }
2448 
2449 /* pmap_copy_page			INTERFACE
2450  **
2451  * Copy the contents of one physical page into another.
2452  *
2453  * This function makes use of two virtual pages allocated in pmap_bootstrap()
2454  * to map the two specified physical pages into the kernel address space.
2455  *
2456  * Note: We could use the transparent translation registers to make the
2457  * mappings.  If we do so, be sure to disable interrupts before using them.
2458  */
2459 void
2460 pmap_copy_page(paddr_t srcpa, paddr_t dstpa)
2461 {
2462 	vaddr_t srcva, dstva;
2463 	int s;
2464 
2465 	srcva = tmp_vpages[0];
2466 	dstva = tmp_vpages[1];
2467 
2468 	s = splvm();
2469 #ifdef DIAGNOSTIC
2470 	if (tmp_vpages_inuse++)
2471 		panic("pmap_copy_page: temporary vpages are in use.");
2472 #endif
2473 
2474 	/* Map pages as non-cacheable to avoid cache polution? */
2475 	pmap_kenter_pa(srcva, srcpa, VM_PROT_READ);
2476 	pmap_kenter_pa(dstva, dstpa, VM_PROT_READ | VM_PROT_WRITE);
2477 
2478 	/* Hand-optimized version of bcopy(src, dst, PAGE_SIZE) */
2479 	copypage((char *)srcva, (char *)dstva);
2480 
2481 	pmap_kremove(srcva, PAGE_SIZE);
2482 	pmap_kremove(dstva, PAGE_SIZE);
2483 
2484 #ifdef DIAGNOSTIC
2485 	--tmp_vpages_inuse;
2486 #endif
2487 	splx(s);
2488 }
2489 
2490 /* pmap_zero_page			INTERFACE
2491  **
2492  * Zero the contents of the specified physical page.
2493  *
2494  * Uses one of the virtual pages allocated in pmap_boostrap()
2495  * to map the specified page into the kernel address space.
2496  */
2497 void
2498 pmap_zero_page(paddr_t dstpa)
2499 {
2500 	vaddr_t dstva;
2501 	int s;
2502 
2503 	dstva = tmp_vpages[1];
2504 	s = splvm();
2505 #ifdef DIAGNOSTIC
2506 	if (tmp_vpages_inuse++)
2507 		panic("pmap_zero_page: temporary vpages are in use.");
2508 #endif
2509 
2510 	/* The comments in pmap_copy_page() above apply here also. */
2511 	pmap_kenter_pa(dstva, dstpa, VM_PROT_READ | VM_PROT_WRITE);
2512 
2513 	/* Hand-optimized version of bzero(ptr, PAGE_SIZE) */
2514 	zeropage((char *)dstva);
2515 
2516 	pmap_kremove(dstva, PAGE_SIZE);
2517 #ifdef DIAGNOSTIC
2518 	--tmp_vpages_inuse;
2519 #endif
2520 	splx(s);
2521 }
2522 
2523 /* pmap_collect			INTERFACE
2524  **
2525  * Called from the VM system when we are about to swap out
2526  * the process using this pmap.  This should give up any
2527  * resources held here, including all its MMU tables.
2528  */
2529 void
2530 pmap_collect(pmap_t pmap)
2531 {
2532 
2533 	/* XXX - todo... */
2534 }
2535 
2536 /* pmap_pinit			INTERNAL
2537  **
2538  * Initialize a pmap structure.
2539  */
2540 static INLINE void
2541 pmap_pinit(pmap_t pmap)
2542 {
2543 
2544 	memset(pmap, 0, sizeof(struct pmap));
2545 	pmap->pm_a_tmgr = NULL;
2546 	pmap->pm_a_phys = kernAphys;
2547 	pmap->pm_refcount = 1;
2548 	simple_lock_init(&pmap->pm_lock);
2549 }
2550 
2551 /* pmap_create			INTERFACE
2552  **
2553  * Create and return a pmap structure.
2554  */
2555 pmap_t
2556 pmap_create(void)
2557 {
2558 	pmap_t	pmap;
2559 
2560 	pmap = pool_get(&pmap_pmap_pool, PR_WAITOK);
2561 	pmap_pinit(pmap);
2562 	return pmap;
2563 }
2564 
2565 /* pmap_release				INTERNAL
2566  **
2567  * Release any resources held by the given pmap.
2568  *
2569  * This is the reverse analog to pmap_pinit.  It does not
2570  * necessarily mean for the pmap structure to be deallocated,
2571  * as in pmap_destroy.
2572  */
2573 static INLINE void
2574 pmap_release(pmap_t pmap)
2575 {
2576 
2577 	/*
2578 	 * As long as the pmap contains no mappings,
2579 	 * which always should be the case whenever
2580 	 * this function is called, there really should
2581 	 * be nothing to do.
2582 	 */
2583 #ifdef	PMAP_DEBUG
2584 	if (pmap == pmap_kernel())
2585 		panic("pmap_release: kernel pmap");
2586 #endif
2587 	/*
2588 	 * XXX - If this pmap has an A table, give it back.
2589 	 * The pmap SHOULD be empty by now, and pmap_remove
2590 	 * should have already given back the A table...
2591 	 * However, I see:  pmap->pm_a_tmgr->at_ecnt == 1
2592 	 * at this point, which means some mapping was not
2593 	 * removed when it should have been. -gwr
2594 	 */
2595 	if (pmap->pm_a_tmgr != NULL) {
2596 		/* First make sure we are not using it! */
2597 		if (kernel_crp.rp_addr == pmap->pm_a_phys) {
2598 			kernel_crp.rp_addr = kernAphys;
2599 			loadcrp(&kernel_crp);
2600 		}
2601 #ifdef	PMAP_DEBUG /* XXX - todo! */
2602 		/* XXX - Now complain... */
2603 		printf("pmap_release: still have table\n");
2604 		Debugger();
2605 #endif
2606 		free_a_table(pmap->pm_a_tmgr, true);
2607 		pmap->pm_a_tmgr = NULL;
2608 		pmap->pm_a_phys = kernAphys;
2609 	}
2610 }
2611 
2612 /* pmap_reference			INTERFACE
2613  **
2614  * Increment the reference count of a pmap.
2615  */
2616 void
2617 pmap_reference(pmap_t pmap)
2618 {
2619 	pmap_lock(pmap);
2620 	pmap_add_ref(pmap);
2621 	pmap_unlock(pmap);
2622 }
2623 
2624 /* pmap_dereference			INTERNAL
2625  **
2626  * Decrease the reference count on the given pmap
2627  * by one and return the current count.
2628  */
2629 static INLINE int
2630 pmap_dereference(pmap_t pmap)
2631 {
2632 	int rtn;
2633 
2634 	pmap_lock(pmap);
2635 	rtn = pmap_del_ref(pmap);
2636 	pmap_unlock(pmap);
2637 
2638 	return rtn;
2639 }
2640 
2641 /* pmap_destroy			INTERFACE
2642  **
2643  * Decrement a pmap's reference count and delete
2644  * the pmap if it becomes zero.  Will be called
2645  * only after all mappings have been removed.
2646  */
2647 void
2648 pmap_destroy(pmap_t pmap)
2649 {
2650 
2651 	if (pmap_dereference(pmap) == 0) {
2652 		pmap_release(pmap);
2653 		pool_put(&pmap_pmap_pool, pmap);
2654 	}
2655 }
2656 
2657 /* pmap_is_referenced			INTERFACE
2658  **
2659  * Determine if the given physical page has been
2660  * referenced (read from [or written to.])
2661  */
2662 bool
2663 pmap_is_referenced(struct vm_page *pg)
2664 {
2665 	paddr_t   pa = VM_PAGE_TO_PHYS(pg);
2666 	pv_t      *pv;
2667 	int       idx;
2668 
2669 	/*
2670 	 * Check the flags on the pv head.  If they are set,
2671 	 * return immediately.  Otherwise a search must be done.
2672 	 */
2673 
2674 	pv = pa2pv(pa);
2675 	if (pv->pv_flags & PV_FLAGS_USED)
2676 		return true;
2677 
2678 	/*
2679 	 * Search through all pv elements pointing
2680 	 * to this page and query their reference bits
2681 	 */
2682 
2683 	for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2684 		if (MMU_PTE_USED(kernCbase[idx])) {
2685 			return true;
2686 		}
2687 	}
2688 	return false;
2689 }
2690 
2691 /* pmap_is_modified			INTERFACE
2692  **
2693  * Determine if the given physical page has been
2694  * modified (written to.)
2695  */
2696 bool
2697 pmap_is_modified(struct vm_page *pg)
2698 {
2699 	paddr_t   pa = VM_PAGE_TO_PHYS(pg);
2700 	pv_t      *pv;
2701 	int       idx;
2702 
2703 	/* see comments in pmap_is_referenced() */
2704 	pv = pa2pv(pa);
2705 	if (pv->pv_flags & PV_FLAGS_MDFY)
2706 		return true;
2707 
2708 	for (idx = pv->pv_idx;
2709 		 idx != PVE_EOL;
2710 		 idx = pvebase[idx].pve_next) {
2711 
2712 		if (MMU_PTE_MODIFIED(kernCbase[idx])) {
2713 			return true;
2714 		}
2715 	}
2716 
2717 	return false;
2718 }
2719 
2720 /* pmap_page_protect			INTERFACE
2721  **
2722  * Applies the given protection to all mappings to the given
2723  * physical page.
2724  */
2725 void
2726 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
2727 {
2728 	paddr_t   pa = VM_PAGE_TO_PHYS(pg);
2729 	pv_t      *pv;
2730 	int       idx;
2731 	vaddr_t va;
2732 	struct mmu_short_pte_struct *pte;
2733 	c_tmgr_t  *c_tbl;
2734 	pmap_t    pmap, curpmap;
2735 
2736 	curpmap = current_pmap();
2737 	pv = pa2pv(pa);
2738 
2739 	for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2740 		pte = &kernCbase[idx];
2741 		switch (prot) {
2742 			case VM_PROT_ALL:
2743 				/* do nothing */
2744 				break;
2745 			case VM_PROT_EXECUTE:
2746 			case VM_PROT_READ:
2747 			case VM_PROT_READ|VM_PROT_EXECUTE:
2748 				/*
2749 				 * Determine the virtual address mapped by
2750 				 * the PTE and flush ATC entries if necessary.
2751 				 */
2752 				va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2753 				pte->attr.raw |= MMU_SHORT_PTE_WP;
2754 				if (pmap == curpmap || pmap == pmap_kernel())
2755 					TBIS(va);
2756 				break;
2757 			case VM_PROT_NONE:
2758 				/* Save the mod/ref bits. */
2759 				pv->pv_flags |= pte->attr.raw;
2760 				/* Invalidate the PTE. */
2761 				pte->attr.raw = MMU_DT_INVALID;
2762 
2763 				/*
2764 				 * Update table counts.  And flush ATC entries
2765 				 * if necessary.
2766 				 */
2767 				va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2768 
2769 				/*
2770 				 * If the PTE belongs to the kernel map,
2771 				 * be sure to flush the page it maps.
2772 				 */
2773 				if (pmap == pmap_kernel()) {
2774 					TBIS(va);
2775 				} else {
2776 					/*
2777 					 * The PTE belongs to a user map.
2778 					 * update the entry count in the C
2779 					 * table to which it belongs and flush
2780 					 * the ATC if the mapping belongs to
2781 					 * the current pmap.
2782 					 */
2783 					c_tbl->ct_ecnt--;
2784 					if (pmap == curpmap)
2785 						TBIS(va);
2786 				}
2787 				break;
2788 			default:
2789 				break;
2790 		}
2791 	}
2792 
2793 	/*
2794 	 * If the protection code indicates that all mappings to the page
2795 	 * be removed, truncate the PV list to zero entries.
2796 	 */
2797 	if (prot == VM_PROT_NONE)
2798 		pv->pv_idx = PVE_EOL;
2799 }
2800 
2801 /* pmap_get_pteinfo		INTERNAL
2802  **
2803  * Called internally to find the pmap and virtual address within that
2804  * map to which the pte at the given index maps.  Also includes the PTE's C
2805  * table manager.
2806  *
2807  * Returns the pmap in the argument provided, and the virtual address
2808  * by return value.
2809  */
2810 vaddr_t
2811 pmap_get_pteinfo(u_int idx, pmap_t *pmap, c_tmgr_t **tbl)
2812 {
2813 	vaddr_t     va = 0;
2814 
2815 	/*
2816 	 * Determine if the PTE is a kernel PTE or a user PTE.
2817 	 */
2818 	if (idx >= NUM_KERN_PTES) {
2819 		/*
2820 		 * The PTE belongs to a user mapping.
2821 		 */
2822 		/* XXX: Would like an inline for this to validate idx... */
2823 		*tbl = &Ctmgrbase[(idx - NUM_KERN_PTES) / MMU_C_TBL_SIZE];
2824 
2825 		*pmap = (*tbl)->ct_pmap;
2826 		/*
2827 		 * To find the va to which the PTE maps, we first take
2828 		 * the table's base virtual address mapping which is stored
2829 		 * in ct_va.  We then increment this address by a page for
2830 		 * every slot skipped until we reach the PTE.
2831 		 */
2832 		va = (*tbl)->ct_va;
2833 		va += m68k_ptob(idx % MMU_C_TBL_SIZE);
2834 	} else {
2835 		/*
2836 		 * The PTE belongs to the kernel map.
2837 		 */
2838 		*pmap = pmap_kernel();
2839 
2840 		va = m68k_ptob(idx);
2841 		va += KERNBASE;
2842 	}
2843 
2844 	return va;
2845 }
2846 
2847 /* pmap_clear_modify			INTERFACE
2848  **
2849  * Clear the modification bit on the page at the specified
2850  * physical address.
2851  *
2852  */
2853 bool
2854 pmap_clear_modify(struct vm_page *pg)
2855 {
2856 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
2857 	bool rv;
2858 
2859 	rv = pmap_is_modified(pg);
2860 	pmap_clear_pv(pa, PV_FLAGS_MDFY);
2861 	return rv;
2862 }
2863 
2864 /* pmap_clear_reference			INTERFACE
2865  **
2866  * Clear the referenced bit on the page at the specified
2867  * physical address.
2868  */
2869 bool
2870 pmap_clear_reference(struct vm_page *pg)
2871 {
2872 	paddr_t pa = VM_PAGE_TO_PHYS(pg);
2873 	bool rv;
2874 
2875 	rv = pmap_is_referenced(pg);
2876 	pmap_clear_pv(pa, PV_FLAGS_USED);
2877 	return rv;
2878 }
2879 
2880 /* pmap_clear_pv			INTERNAL
2881  **
2882  * Clears the specified flag from the specified physical address.
2883  * (Used by pmap_clear_modify() and pmap_clear_reference().)
2884  *
2885  * Flag is one of:
2886  *   PV_FLAGS_MDFY - Page modified bit.
2887  *   PV_FLAGS_USED - Page used (referenced) bit.
2888  *
2889  * This routine must not only clear the flag on the pv list
2890  * head.  It must also clear the bit on every pte in the pv
2891  * list associated with the address.
2892  */
2893 void
2894 pmap_clear_pv(paddr_t pa, int flag)
2895 {
2896 	pv_t      *pv;
2897 	int       idx;
2898 	vaddr_t   va;
2899 	pmap_t          pmap;
2900 	mmu_short_pte_t *pte;
2901 	c_tmgr_t        *c_tbl;
2902 
2903 	pv = pa2pv(pa);
2904 	pv->pv_flags &= ~(flag);
2905 	for (idx = pv->pv_idx; idx != PVE_EOL; idx = pvebase[idx].pve_next) {
2906 		pte = &kernCbase[idx];
2907 		pte->attr.raw &= ~(flag);
2908 
2909 		/*
2910 		 * The MC68030 MMU will not set the modified or
2911 		 * referenced bits on any MMU tables for which it has
2912 		 * a cached descriptor with its modify bit set.  To insure
2913 		 * that it will modify these bits on the PTE during the next
2914 		 * time it is written to or read from, we must flush it from
2915 		 * the ATC.
2916 		 *
2917 		 * Ordinarily it is only necessary to flush the descriptor
2918 		 * if it is used in the current address space.  But since I
2919 		 * am not sure that there will always be a notion of
2920 		 * 'the current address space' when this function is called,
2921 		 * I will skip the test and always flush the address.  It
2922 		 * does no harm.
2923 		 */
2924 
2925 		va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
2926 		TBIS(va);
2927 	}
2928 }
2929 
2930 /* pmap_extract_kernel		INTERNAL
2931  **
2932  * Extract a translation from the kernel address space.
2933  */
2934 static INLINE bool
2935 pmap_extract_kernel(vaddr_t va, paddr_t *pap)
2936 {
2937 	mmu_short_pte_t *pte;
2938 
2939 	pte = &kernCbase[(u_int)m68k_btop(va - KERNBASE)];
2940 	if (!MMU_VALID_DT(*pte))
2941 		return false;
2942 	if (pap != NULL)
2943 		*pap = MMU_PTE_PA(*pte);
2944 	return true;
2945 }
2946 
2947 /* pmap_extract			INTERFACE
2948  **
2949  * Return the physical address mapped by the virtual address
2950  * in the specified pmap.
2951  *
2952  * Note: this function should also apply an exclusive lock
2953  * on the pmap system during its duration.
2954  */
2955 bool
2956 pmap_extract(pmap_t pmap, vaddr_t va, paddr_t *pap)
2957 {
2958 	int a_idx, b_idx, pte_idx;
2959 	a_tmgr_t	*a_tbl;
2960 	b_tmgr_t	*b_tbl;
2961 	c_tmgr_t	*c_tbl;
2962 	mmu_short_pte_t	*c_pte;
2963 
2964 	if (pmap == pmap_kernel())
2965 		return pmap_extract_kernel(va, pap);
2966 
2967 	if (pmap_stroll(pmap, va, &a_tbl, &b_tbl, &c_tbl,
2968 		&c_pte, &a_idx, &b_idx, &pte_idx) == false)
2969 		return false;
2970 
2971 	if (!MMU_VALID_DT(*c_pte))
2972 		return false;
2973 
2974 	if (pap != NULL)
2975 		*pap = MMU_PTE_PA(*c_pte);
2976 	return true;
2977 }
2978 
2979 /* pmap_remove_kernel		INTERNAL
2980  **
2981  * Remove the mapping of a range of virtual addresses from the kernel map.
2982  * The arguments are already page-aligned.
2983  */
2984 static INLINE void
2985 pmap_remove_kernel(vaddr_t sva, vaddr_t eva)
2986 {
2987 	int idx, eidx;
2988 
2989 #ifdef	PMAP_DEBUG
2990 	if ((sva & PGOFSET) || (eva & PGOFSET))
2991 		panic("pmap_remove_kernel: alignment");
2992 #endif
2993 
2994 	idx  = m68k_btop(sva - KERNBASE);
2995 	eidx = m68k_btop(eva - KERNBASE);
2996 
2997 	while (idx < eidx) {
2998 		pmap_remove_pte(&kernCbase[idx++]);
2999 		TBIS(sva);
3000 		sva += PAGE_SIZE;
3001 	}
3002 }
3003 
3004 /* pmap_remove			INTERFACE
3005  **
3006  * Remove the mapping of a range of virtual addresses from the given pmap.
3007  *
3008  */
3009 void
3010 pmap_remove(pmap_t pmap, vaddr_t sva, vaddr_t eva)
3011 {
3012 
3013 	if (pmap == pmap_kernel()) {
3014 		pmap_remove_kernel(sva, eva);
3015 		return;
3016 	}
3017 
3018 	/*
3019 	 * If the pmap doesn't have an A table of its own, it has no mappings
3020 	 * that can be removed.
3021 	 */
3022 	if (pmap->pm_a_tmgr == NULL)
3023 		return;
3024 
3025 	/*
3026 	 * Remove the specified range from the pmap.  If the function
3027 	 * returns true, the operation removed all the valid mappings
3028 	 * in the pmap and freed its A table.  If this happened to the
3029 	 * currently loaded pmap, the MMU root pointer must be reloaded
3030 	 * with the default 'kernel' map.
3031 	 */
3032 	if (pmap_remove_a(pmap->pm_a_tmgr, sva, eva)) {
3033 		if (kernel_crp.rp_addr == pmap->pm_a_phys) {
3034 			kernel_crp.rp_addr = kernAphys;
3035 			loadcrp(&kernel_crp);
3036 			/* will do TLB flush below */
3037 		}
3038 		pmap->pm_a_tmgr = NULL;
3039 		pmap->pm_a_phys = kernAphys;
3040 	}
3041 
3042 	/*
3043 	 * If we just modified the current address space,
3044 	 * make sure to flush the MMU cache.
3045 	 *
3046 	 * XXX - this could be an unecessarily large flush.
3047 	 * XXX - Could decide, based on the size of the VA range
3048 	 * to be removed, whether to flush "by pages" or "all".
3049 	 */
3050 	if (pmap == current_pmap())
3051 		TBIAU();
3052 }
3053 
3054 /* pmap_remove_a			INTERNAL
3055  **
3056  * This is function number one in a set of three that removes a range
3057  * of memory in the most efficient manner by removing the highest possible
3058  * tables from the memory space.  This particular function attempts to remove
3059  * as many B tables as it can, delegating the remaining fragmented ranges to
3060  * pmap_remove_b().
3061  *
3062  * If the removal operation results in an empty A table, the function returns
3063  * true.
3064  *
3065  * It's ugly but will do for now.
3066  */
3067 bool
3068 pmap_remove_a(a_tmgr_t *a_tbl, vaddr_t sva, vaddr_t eva)
3069 {
3070 	bool empty;
3071 	int idx;
3072 	vaddr_t nstart, nend;
3073 	b_tmgr_t *b_tbl;
3074 	mmu_long_dte_t  *a_dte;
3075 	mmu_short_dte_t *b_dte;
3076 	uint8_t at_wired, bt_wired;
3077 
3078 	/*
3079 	 * The following code works with what I call a 'granularity
3080 	 * reduction algorithim'.  A range of addresses will always have
3081 	 * the following properties, which are classified according to
3082 	 * how the range relates to the size of the current granularity
3083 	 * - an A table entry:
3084 	 *
3085 	 *            1 2       3 4
3086 	 * -+---+---+---+---+---+---+---+-
3087 	 * -+---+---+---+---+---+---+---+-
3088 	 *
3089 	 * A range will always start on a granularity boundary, illustrated
3090 	 * by '+' signs in the table above, or it will start at some point
3091 	 * inbetween a granularity boundary, as illustrated by point 1.
3092 	 * The first step in removing a range of addresses is to remove the
3093 	 * range between 1 and 2, the nearest granularity boundary.  This
3094 	 * job is handled by the section of code governed by the
3095 	 * 'if (start < nstart)' statement.
3096 	 *
3097 	 * A range will always encompass zero or more intergral granules,
3098 	 * illustrated by points 2 and 3.  Integral granules are easy to
3099 	 * remove.  The removal of these granules is the second step, and
3100 	 * is handled by the code block 'if (nstart < nend)'.
3101 	 *
3102 	 * Lastly, a range will always end on a granularity boundary,
3103 	 * ill. by point 3, or it will fall just beyond one, ill. by point
3104 	 * 4.  The last step involves removing this range and is handled by
3105 	 * the code block 'if (nend < end)'.
3106 	 */
3107 	nstart = MMU_ROUND_UP_A(sva);
3108 	nend = MMU_ROUND_A(eva);
3109 
3110 	at_wired = a_tbl->at_wcnt;
3111 
3112 	if (sva < nstart) {
3113 		/*
3114 		 * This block is executed if the range starts between
3115 		 * a granularity boundary.
3116 		 *
3117 		 * First find the DTE which is responsible for mapping
3118 		 * the start of the range.
3119 		 */
3120 		idx = MMU_TIA(sva);
3121 		a_dte = &a_tbl->at_dtbl[idx];
3122 
3123 		/*
3124 		 * If the DTE is valid then delegate the removal of the sub
3125 		 * range to pmap_remove_b(), which can remove addresses at
3126 		 * a finer granularity.
3127 		 */
3128 		if (MMU_VALID_DT(*a_dte)) {
3129 			b_dte = mmu_ptov(a_dte->addr.raw);
3130 			b_tbl = mmuB2tmgr(b_dte);
3131 			bt_wired = b_tbl->bt_wcnt;
3132 
3133 			/*
3134 			 * The sub range to be removed starts at the start
3135 			 * of the full range we were asked to remove, and ends
3136 			 * at the greater of:
3137 			 * 1. The end of the full range, -or-
3138 			 * 2. The end of the full range, rounded down to the
3139 			 *    nearest granularity boundary.
3140 			 */
3141 			if (eva < nstart)
3142 				empty = pmap_remove_b(b_tbl, sva, eva);
3143 			else
3144 				empty = pmap_remove_b(b_tbl, sva, nstart);
3145 
3146 			/*
3147 			 * If the child table no longer has wired entries,
3148 			 * decrement wired entry count.
3149 			 */
3150 			if (bt_wired && b_tbl->bt_wcnt == 0)
3151 				a_tbl->at_wcnt--;
3152 
3153 			/*
3154 			 * If the removal resulted in an empty B table,
3155 			 * invalidate the DTE that points to it and decrement
3156 			 * the valid entry count of the A table.
3157 			 */
3158 			if (empty) {
3159 				a_dte->attr.raw = MMU_DT_INVALID;
3160 				a_tbl->at_ecnt--;
3161 			}
3162 		}
3163 		/*
3164 		 * If the DTE is invalid, the address range is already non-
3165 		 * existent and can simply be skipped.
3166 		 */
3167 	}
3168 	if (nstart < nend) {
3169 		/*
3170 		 * This block is executed if the range spans a whole number
3171 		 * multiple of granules (A table entries.)
3172 		 *
3173 		 * First find the DTE which is responsible for mapping
3174 		 * the start of the first granule involved.
3175 		 */
3176 		idx = MMU_TIA(nstart);
3177 		a_dte = &a_tbl->at_dtbl[idx];
3178 
3179 		/*
3180 		 * Remove entire sub-granules (B tables) one at a time,
3181 		 * until reaching the end of the range.
3182 		 */
3183 		for (; nstart < nend; a_dte++, nstart += MMU_TIA_RANGE)
3184 			if (MMU_VALID_DT(*a_dte)) {
3185 				/*
3186 				 * Find the B table manager for the
3187 				 * entry and free it.
3188 				 */
3189 				b_dte = mmu_ptov(a_dte->addr.raw);
3190 				b_tbl = mmuB2tmgr(b_dte);
3191 				bt_wired = b_tbl->bt_wcnt;
3192 
3193 				free_b_table(b_tbl, true);
3194 
3195 				/*
3196 				 * All child entries has been removed.
3197 				 * If there were any wired entries in it,
3198 				 * decrement wired entry count.
3199 				 */
3200 				if (bt_wired)
3201 					a_tbl->at_wcnt--;
3202 
3203 				/*
3204 				 * Invalidate the DTE that points to the
3205 				 * B table and decrement the valid entry
3206 				 * count of the A table.
3207 				 */
3208 				a_dte->attr.raw = MMU_DT_INVALID;
3209 				a_tbl->at_ecnt--;
3210 			}
3211 	}
3212 	if (nend < eva) {
3213 		/*
3214 		 * This block is executed if the range ends beyond a
3215 		 * granularity boundary.
3216 		 *
3217 		 * First find the DTE which is responsible for mapping
3218 		 * the start of the nearest (rounded down) granularity
3219 		 * boundary.
3220 		 */
3221 		idx = MMU_TIA(nend);
3222 		a_dte = &a_tbl->at_dtbl[idx];
3223 
3224 		/*
3225 		 * If the DTE is valid then delegate the removal of the sub
3226 		 * range to pmap_remove_b(), which can remove addresses at
3227 		 * a finer granularity.
3228 		 */
3229 		if (MMU_VALID_DT(*a_dte)) {
3230 			/*
3231 			 * Find the B table manager for the entry
3232 			 * and hand it to pmap_remove_b() along with
3233 			 * the sub range.
3234 			 */
3235 			b_dte = mmu_ptov(a_dte->addr.raw);
3236 			b_tbl = mmuB2tmgr(b_dte);
3237 			bt_wired = b_tbl->bt_wcnt;
3238 
3239 			empty = pmap_remove_b(b_tbl, nend, eva);
3240 
3241 			/*
3242 			 * If the child table no longer has wired entries,
3243 			 * decrement wired entry count.
3244 			 */
3245 			if (bt_wired && b_tbl->bt_wcnt == 0)
3246 				a_tbl->at_wcnt--;
3247 			/*
3248 			 * If the removal resulted in an empty B table,
3249 			 * invalidate the DTE that points to it and decrement
3250 			 * the valid entry count of the A table.
3251 			 */
3252 			if (empty) {
3253 				a_dte->attr.raw = MMU_DT_INVALID;
3254 				a_tbl->at_ecnt--;
3255 			}
3256 		}
3257 	}
3258 
3259 	/*
3260 	 * If there are no more entries in the A table, release it
3261 	 * back to the available pool and return true.
3262 	 */
3263 	if (a_tbl->at_ecnt == 0) {
3264 		KASSERT(a_tbl->at_wcnt == 0);
3265 		a_tbl->at_parent = NULL;
3266 		if (!at_wired)
3267 			TAILQ_REMOVE(&a_pool, a_tbl, at_link);
3268 		TAILQ_INSERT_HEAD(&a_pool, a_tbl, at_link);
3269 		empty = true;
3270 	} else {
3271 		/*
3272 		 * If the table doesn't have wired entries any longer
3273 		 * but still has unwired entries, put it back into
3274 		 * the available queue.
3275 		 */
3276 		if (at_wired && a_tbl->at_wcnt == 0)
3277 			TAILQ_INSERT_TAIL(&a_pool, a_tbl, at_link);
3278 		empty = false;
3279 	}
3280 
3281 	return empty;
3282 }
3283 
3284 /* pmap_remove_b			INTERNAL
3285  **
3286  * Remove a range of addresses from an address space, trying to remove entire
3287  * C tables if possible.
3288  *
3289  * If the operation results in an empty B table, the function returns true.
3290  */
3291 bool
3292 pmap_remove_b(b_tmgr_t *b_tbl, vaddr_t sva, vaddr_t eva)
3293 {
3294 	bool empty;
3295 	int idx;
3296 	vaddr_t nstart, nend, rstart;
3297 	c_tmgr_t *c_tbl;
3298 	mmu_short_dte_t  *b_dte;
3299 	mmu_short_pte_t  *c_dte;
3300 	uint8_t bt_wired, ct_wired;
3301 
3302 	nstart = MMU_ROUND_UP_B(sva);
3303 	nend = MMU_ROUND_B(eva);
3304 
3305 	bt_wired = b_tbl->bt_wcnt;
3306 
3307 	if (sva < nstart) {
3308 		idx = MMU_TIB(sva);
3309 		b_dte = &b_tbl->bt_dtbl[idx];
3310 		if (MMU_VALID_DT(*b_dte)) {
3311 			c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3312 			c_tbl = mmuC2tmgr(c_dte);
3313 			ct_wired = c_tbl->ct_wcnt;
3314 
3315 			if (eva < nstart)
3316 				empty = pmap_remove_c(c_tbl, sva, eva);
3317 			else
3318 				empty = pmap_remove_c(c_tbl, sva, nstart);
3319 
3320 			/*
3321 			 * If the child table no longer has wired entries,
3322 			 * decrement wired entry count.
3323 			 */
3324 			if (ct_wired && c_tbl->ct_wcnt == 0)
3325 				b_tbl->bt_wcnt--;
3326 
3327 			if (empty) {
3328 				b_dte->attr.raw = MMU_DT_INVALID;
3329 				b_tbl->bt_ecnt--;
3330 			}
3331 		}
3332 	}
3333 	if (nstart < nend) {
3334 		idx = MMU_TIB(nstart);
3335 		b_dte = &b_tbl->bt_dtbl[idx];
3336 		rstart = nstart;
3337 		while (rstart < nend) {
3338 			if (MMU_VALID_DT(*b_dte)) {
3339 				c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3340 				c_tbl = mmuC2tmgr(c_dte);
3341 				ct_wired = c_tbl->ct_wcnt;
3342 
3343 				free_c_table(c_tbl, true);
3344 
3345 				/*
3346 				 * All child entries has been removed.
3347 				 * If there were any wired entries in it,
3348 				 * decrement wired entry count.
3349 				 */
3350 				if (ct_wired)
3351 					b_tbl->bt_wcnt--;
3352 
3353 				b_dte->attr.raw = MMU_DT_INVALID;
3354 				b_tbl->bt_ecnt--;
3355 			}
3356 			b_dte++;
3357 			rstart += MMU_TIB_RANGE;
3358 		}
3359 	}
3360 	if (nend < eva) {
3361 		idx = MMU_TIB(nend);
3362 		b_dte = &b_tbl->bt_dtbl[idx];
3363 		if (MMU_VALID_DT(*b_dte)) {
3364 			c_dte = mmu_ptov(MMU_DTE_PA(*b_dte));
3365 			c_tbl = mmuC2tmgr(c_dte);
3366 			ct_wired = c_tbl->ct_wcnt;
3367 			empty = pmap_remove_c(c_tbl, nend, eva);
3368 
3369 			/*
3370 			 * If the child table no longer has wired entries,
3371 			 * decrement wired entry count.
3372 			 */
3373 			if (ct_wired && c_tbl->ct_wcnt == 0)
3374 				b_tbl->bt_wcnt--;
3375 
3376 			if (empty) {
3377 				b_dte->attr.raw = MMU_DT_INVALID;
3378 				b_tbl->bt_ecnt--;
3379 			}
3380 		}
3381 	}
3382 
3383 	if (b_tbl->bt_ecnt == 0) {
3384 		KASSERT(b_tbl->bt_wcnt == 0);
3385 		b_tbl->bt_parent = NULL;
3386 		if (!bt_wired)
3387 			TAILQ_REMOVE(&b_pool, b_tbl, bt_link);
3388 		TAILQ_INSERT_HEAD(&b_pool, b_tbl, bt_link);
3389 		empty = true;
3390 	} else {
3391 		/*
3392 		 * If the table doesn't have wired entries any longer
3393 		 * but still has unwired entries, put it back into
3394 		 * the available queue.
3395 		 */
3396 		if (bt_wired && b_tbl->bt_wcnt == 0)
3397 			TAILQ_INSERT_TAIL(&b_pool, b_tbl, bt_link);
3398 
3399 		empty = false;
3400 	}
3401 
3402 	return empty;
3403 }
3404 
3405 /* pmap_remove_c			INTERNAL
3406  **
3407  * Remove a range of addresses from the given C table.
3408  */
3409 bool
3410 pmap_remove_c(c_tmgr_t *c_tbl, vaddr_t sva, vaddr_t eva)
3411 {
3412 	bool empty;
3413 	int idx;
3414 	mmu_short_pte_t *c_pte;
3415 	uint8_t ct_wired;
3416 
3417 	ct_wired = c_tbl->ct_wcnt;
3418 
3419 	idx = MMU_TIC(sva);
3420 	c_pte = &c_tbl->ct_dtbl[idx];
3421 	for (; sva < eva; sva += MMU_PAGE_SIZE, c_pte++) {
3422 		if (MMU_VALID_DT(*c_pte)) {
3423 			if (c_pte->attr.raw & MMU_SHORT_PTE_WIRED)
3424 				c_tbl->ct_wcnt--;
3425 			pmap_remove_pte(c_pte);
3426 			c_tbl->ct_ecnt--;
3427 		}
3428 	}
3429 
3430 	if (c_tbl->ct_ecnt == 0) {
3431 		KASSERT(c_tbl->ct_wcnt == 0);
3432 		c_tbl->ct_parent = NULL;
3433 		if (!ct_wired)
3434 			TAILQ_REMOVE(&c_pool, c_tbl, ct_link);
3435 		TAILQ_INSERT_HEAD(&c_pool, c_tbl, ct_link);
3436 		empty = true;
3437 	} else {
3438 		/*
3439 		 * If the table doesn't have wired entries any longer
3440 		 * but still has unwired entries, put it back into
3441 		 * the available queue.
3442 		 */
3443 		if (ct_wired && c_tbl->ct_wcnt == 0)
3444 			TAILQ_INSERT_TAIL(&c_pool, c_tbl, ct_link);
3445 		empty = false;
3446 	}
3447 
3448 	return empty;
3449 }
3450 
3451 /* pmap_bootstrap_alloc			INTERNAL
3452  **
3453  * Used internally for memory allocation at startup when malloc is not
3454  * available.  This code will fail once it crosses the first memory
3455  * bank boundary on the 3/80.  Hopefully by then however, the VM system
3456  * will be in charge of allocation.
3457  */
3458 void *
3459 pmap_bootstrap_alloc(int size)
3460 {
3461 	void *rtn;
3462 
3463 #ifdef	PMAP_DEBUG
3464 	if (bootstrap_alloc_enabled == false) {
3465 		mon_printf("pmap_bootstrap_alloc: disabled\n");
3466 		sunmon_abort();
3467 	}
3468 #endif
3469 
3470 	rtn = (void *) virtual_avail;
3471 	virtual_avail += size;
3472 
3473 #ifdef	PMAP_DEBUG
3474 	if (virtual_avail > virtual_contig_end) {
3475 		mon_printf("pmap_bootstrap_alloc: out of mem\n");
3476 		sunmon_abort();
3477 	}
3478 #endif
3479 
3480 	return rtn;
3481 }
3482 
3483 /* pmap_bootstap_aalign			INTERNAL
3484  **
3485  * Used to insure that the next call to pmap_bootstrap_alloc() will
3486  * return a chunk of memory aligned to the specified size.
3487  *
3488  * Note: This function will only support alignment sizes that are powers
3489  * of two.
3490  */
3491 void
3492 pmap_bootstrap_aalign(int size)
3493 {
3494 	int off;
3495 
3496 	off = virtual_avail & (size - 1);
3497 	if (off) {
3498 		(void)pmap_bootstrap_alloc(size - off);
3499 	}
3500 }
3501 
3502 /* pmap_pa_exists
3503  **
3504  * Used by the /dev/mem driver to see if a given PA is memory
3505  * that can be mapped.  (The PA is not in a hole.)
3506  */
3507 int
3508 pmap_pa_exists(paddr_t pa)
3509 {
3510 	int i;
3511 
3512 	for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3513 		if ((pa >= avail_mem[i].pmem_start) &&
3514 			(pa <  avail_mem[i].pmem_end))
3515 			return 1;
3516 		if (avail_mem[i].pmem_next == NULL)
3517 			break;
3518 	}
3519 	return 0;
3520 }
3521 
3522 /* Called only from locore.s and pmap.c */
3523 void	_pmap_switch(pmap_t pmap);
3524 
3525 /*
3526  * _pmap_switch			INTERNAL
3527  *
3528  * This is called by locore.s:cpu_switch() when it is
3529  * switching to a new process.  Load new translations.
3530  * Note: done in-line by locore.s unless PMAP_DEBUG
3531  *
3532  * Note that we do NOT allocate a context here, but
3533  * share the "kernel only" context until we really
3534  * need our own context for user-space mappings in
3535  * pmap_enter_user().  [ s/context/mmu A table/ ]
3536  */
3537 void
3538 _pmap_switch(pmap_t pmap)
3539 {
3540 	u_long rootpa;
3541 
3542 	/*
3543 	 * Only do reload/flush if we have to.
3544 	 * Note that if the old and new process
3545 	 * were BOTH using the "null" context,
3546 	 * then this will NOT flush the TLB.
3547 	 */
3548 	rootpa = pmap->pm_a_phys;
3549 	if (kernel_crp.rp_addr != rootpa) {
3550 		DPRINT(("pmap_activate(%p)\n", pmap));
3551 		kernel_crp.rp_addr = rootpa;
3552 		loadcrp(&kernel_crp);
3553 		TBIAU();
3554 	}
3555 }
3556 
3557 /*
3558  * Exported version of pmap_activate().  This is called from the
3559  * machine-independent VM code when a process is given a new pmap.
3560  * If (p == curlwp) do like cpu_switch would do; otherwise just
3561  * take this as notification that the process has a new pmap.
3562  */
3563 void
3564 pmap_activate(struct lwp *l)
3565 {
3566 
3567 	if (l->l_proc == curproc) {
3568 		_pmap_switch(l->l_proc->p_vmspace->vm_map.pmap);
3569 	}
3570 }
3571 
3572 /*
3573  * pmap_deactivate			INTERFACE
3574  **
3575  * This is called to deactivate the specified process's address space.
3576  */
3577 void
3578 pmap_deactivate(struct lwp *l)
3579 {
3580 
3581 	/* Nothing to do. */
3582 }
3583 
3584 /*
3585  * Fill in the sun3x-specific part of the kernel core header
3586  * for dumpsys().  (See machdep.c for the rest.)
3587  */
3588 void
3589 pmap_kcore_hdr(struct sun3x_kcore_hdr *sh)
3590 {
3591 	u_long spa, len;
3592 	int i;
3593 
3594 	sh->pg_frame = MMU_SHORT_PTE_BASEADDR;
3595 	sh->pg_valid = MMU_DT_PAGE;
3596 	sh->contig_end = virtual_contig_end;
3597 	sh->kernCbase = (u_long)kernCbase;
3598 	for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3599 		spa = avail_mem[i].pmem_start;
3600 		spa = m68k_trunc_page(spa);
3601 		len = avail_mem[i].pmem_end - spa;
3602 		len = m68k_round_page(len);
3603 		sh->ram_segs[i].start = spa;
3604 		sh->ram_segs[i].size  = len;
3605 	}
3606 }
3607 
3608 
3609 /* pmap_virtual_space			INTERFACE
3610  **
3611  * Return the current available range of virtual addresses in the
3612  * arguuments provided.  Only really called once.
3613  */
3614 void
3615 pmap_virtual_space(vaddr_t *vstart, vaddr_t *vend)
3616 {
3617 
3618 	*vstart = virtual_avail;
3619 	*vend = virtual_end;
3620 }
3621 
3622 /*
3623  * Provide memory to the VM system.
3624  *
3625  * Assume avail_start is always in the
3626  * first segment as pmap_bootstrap does.
3627  */
3628 static void
3629 pmap_page_upload(void)
3630 {
3631 	paddr_t	a, b;	/* memory range */
3632 	int i;
3633 
3634 	/* Supply the memory in segments. */
3635 	for (i = 0; i < SUN3X_NPHYS_RAM_SEGS; i++) {
3636 		a = atop(avail_mem[i].pmem_start);
3637 		b = atop(avail_mem[i].pmem_end);
3638 		if (i == 0)
3639 			a = atop(avail_start);
3640 		if (avail_mem[i].pmem_end > avail_end)
3641 			b = atop(avail_end);
3642 
3643 		uvm_page_physload(a, b, a, b, VM_FREELIST_DEFAULT);
3644 
3645 		if (avail_mem[i].pmem_next == NULL)
3646 			break;
3647 	}
3648 }
3649 
3650 /* pmap_count			INTERFACE
3651  **
3652  * Return the number of resident (valid) pages in the given pmap.
3653  *
3654  * Note:  If this function is handed the kernel map, it will report
3655  * that it has no mappings.  Hopefully the VM system won't ask for kernel
3656  * map statistics.
3657  */
3658 segsz_t
3659 pmap_count(pmap_t pmap, int type)
3660 {
3661 	u_int     count;
3662 	int       a_idx, b_idx;
3663 	a_tmgr_t *a_tbl;
3664 	b_tmgr_t *b_tbl;
3665 	c_tmgr_t *c_tbl;
3666 
3667 	/*
3668 	 * If the pmap does not have its own A table manager, it has no
3669 	 * valid entires.
3670 	 */
3671 	if (pmap->pm_a_tmgr == NULL)
3672 		return 0;
3673 
3674 	a_tbl = pmap->pm_a_tmgr;
3675 
3676 	count = 0;
3677 	for (a_idx = 0; a_idx < MMU_TIA(KERNBASE); a_idx++) {
3678 	    if (MMU_VALID_DT(a_tbl->at_dtbl[a_idx])) {
3679 	        b_tbl = mmuB2tmgr(mmu_ptov(a_tbl->at_dtbl[a_idx].addr.raw));
3680 	        for (b_idx = 0; b_idx < MMU_B_TBL_SIZE; b_idx++) {
3681 	            if (MMU_VALID_DT(b_tbl->bt_dtbl[b_idx])) {
3682 	                c_tbl = mmuC2tmgr(
3683 	                    mmu_ptov(MMU_DTE_PA(b_tbl->bt_dtbl[b_idx])));
3684 	                if (type == 0)
3685 	                    /*
3686 	                     * A resident entry count has been requested.
3687 	                     */
3688 	                    count += c_tbl->ct_ecnt;
3689 	                else
3690 	                    /*
3691 	                     * A wired entry count has been requested.
3692 	                     */
3693 	                    count += c_tbl->ct_wcnt;
3694 	            }
3695 	        }
3696 	    }
3697 	}
3698 
3699 	return count;
3700 }
3701 
3702 /************************ SUN3 COMPATIBILITY ROUTINES ********************
3703  * The following routines are only used by DDB for tricky kernel text    *
3704  * text operations in db_memrw.c.  They are provided for sun3            *
3705  * compatibility.                                                        *
3706  *************************************************************************/
3707 /* get_pte			INTERNAL
3708  **
3709  * Return the page descriptor the describes the kernel mapping
3710  * of the given virtual address.
3711  */
3712 extern u_long ptest_addr(u_long);	/* XXX: locore.s */
3713 u_int
3714 get_pte(vaddr_t va)
3715 {
3716 	u_long pte_pa;
3717 	mmu_short_pte_t *pte;
3718 
3719 	/* Get the physical address of the PTE */
3720 	pte_pa = ptest_addr(va & ~PGOFSET);
3721 
3722 	/* Convert to a virtual address... */
3723 	pte = (mmu_short_pte_t *) (KERNBASE + pte_pa);
3724 
3725 	/* Make sure it is in our level-C tables... */
3726 	if ((pte < kernCbase) ||
3727 		(pte >= &mmuCbase[NUM_USER_PTES]))
3728 		return 0;
3729 
3730 	/* ... and just return its contents. */
3731 	return (pte->attr.raw);
3732 }
3733 
3734 
3735 /* set_pte			INTERNAL
3736  **
3737  * Set the page descriptor that describes the kernel mapping
3738  * of the given virtual address.
3739  */
3740 void
3741 set_pte(vaddr_t va, u_int pte)
3742 {
3743 	u_long idx;
3744 
3745 	if (va < KERNBASE)
3746 		return;
3747 
3748 	idx = (unsigned long) m68k_btop(va - KERNBASE);
3749 	kernCbase[idx].attr.raw = pte;
3750 	TBIS(va);
3751 }
3752 
3753 /*
3754  *	Routine:        pmap_procwr
3755  *
3756  *	Function:
3757  *		Synchronize caches corresponding to [addr, addr+len) in p.
3758  */
3759 void
3760 pmap_procwr(struct proc *p, vaddr_t va, size_t len)
3761 {
3762 
3763 	(void)cachectl1(0x80000004, va, len, p);
3764 }
3765 
3766 
3767 #ifdef	PMAP_DEBUG
3768 /************************** DEBUGGING ROUTINES **************************
3769  * The following routines are meant to be an aid to debugging the pmap  *
3770  * system.  They are callable from the DDB command line and should be   *
3771  * prepared to be handed unstable or incomplete states of the system.   *
3772  ************************************************************************/
3773 
3774 /* pv_list
3775  **
3776  * List all pages found on the pv list for the given physical page.
3777  * To avoid endless loops, the listing will stop at the end of the list
3778  * or after 'n' entries - whichever comes first.
3779  */
3780 void
3781 pv_list(paddr_t pa, int n)
3782 {
3783 	int  idx;
3784 	vaddr_t va;
3785 	pv_t *pv;
3786 	c_tmgr_t *c_tbl;
3787 	pmap_t pmap;
3788 
3789 	pv = pa2pv(pa);
3790 	idx = pv->pv_idx;
3791 	for (; idx != PVE_EOL && n > 0; idx = pvebase[idx].pve_next, n--) {
3792 		va = pmap_get_pteinfo(idx, &pmap, &c_tbl);
3793 		printf("idx %d, pmap 0x%x, va 0x%x, c_tbl %x\n",
3794 			idx, (u_int) pmap, (u_int) va, (u_int) c_tbl);
3795 	}
3796 }
3797 #endif	/* PMAP_DEBUG */
3798 
3799 #ifdef NOT_YET
3800 /* and maybe not ever */
3801 /************************** LOW-LEVEL ROUTINES **************************
3802  * These routines will eventually be re-written into assembly and placed*
3803  * in locore.s.  They are here now as stubs so that the pmap module can *
3804  * be linked as a standalone user program for testing.                  *
3805  ************************************************************************/
3806 /* flush_atc_crp			INTERNAL
3807  **
3808  * Flush all page descriptors derived from the given CPU Root Pointer
3809  * (CRP), or 'A' table as it is known here, from the 68851's automatic
3810  * cache.
3811  */
3812 void
3813 flush_atc_crp(int a_tbl)
3814 {
3815 	mmu_long_rp_t rp;
3816 
3817 	/* Create a temporary root table pointer that points to the
3818 	 * given A table.
3819 	 */
3820 	rp.attr.raw = ~MMU_LONG_RP_LU;
3821 	rp.addr.raw = (unsigned int) a_tbl;
3822 
3823 	mmu_pflushr(&rp);
3824 	/* mmu_pflushr:
3825 	 * 	movel   sp(4)@,a0
3826 	 * 	pflushr a0@
3827 	 *	rts
3828 	 */
3829 }
3830 #endif /* NOT_YET */
3831