xref: /csrg-svn/sys/pmax/pmax/pmap.c (revision 52118)
1*52118Smckusick /*
2*52118Smckusick  * Copyright (c) 1992 The Regents of the University of California.
3*52118Smckusick  * All rights reserved.
4*52118Smckusick  *
5*52118Smckusick  * This code is derived from software contributed to Berkeley by
6*52118Smckusick  * the Systems Programming Group of the University of Utah Computer
7*52118Smckusick  * Science Department and Ralph Campbell.
8*52118Smckusick  *
9*52118Smckusick  * %sccs.include.redist.c%
10*52118Smckusick  *
11*52118Smckusick  *	@(#)pmap.c	7.1 (Berkeley) 01/07/92
12*52118Smckusick  */
13*52118Smckusick 
14*52118Smckusick /*
15*52118Smckusick  *	Manages physical address maps.
16*52118Smckusick  *
17*52118Smckusick  *	In addition to hardware address maps, this
18*52118Smckusick  *	module is called upon to provide software-use-only
19*52118Smckusick  *	maps which may or may not be stored in the same
20*52118Smckusick  *	form as hardware maps.  These pseudo-maps are
21*52118Smckusick  *	used to store intermediate results from copy
22*52118Smckusick  *	operations to and from address spaces.
23*52118Smckusick  *
24*52118Smckusick  *	Since the information managed by this module is
25*52118Smckusick  *	also stored by the logical address mapping module,
26*52118Smckusick  *	this module may throw away valid virtual-to-physical
27*52118Smckusick  *	mappings at almost any time.  However, invalidations
28*52118Smckusick  *	of virtual-to-physical mappings must be done as
29*52118Smckusick  *	requested.
30*52118Smckusick  *
31*52118Smckusick  *	In order to cope with hardware architectures which
32*52118Smckusick  *	make virtual-to-physical map invalidates expensive,
33*52118Smckusick  *	this module may delay invalidate or reduced protection
34*52118Smckusick  *	operations until such time as they are actually
35*52118Smckusick  *	necessary.  This module is given full information as
36*52118Smckusick  *	to which processors are currently using which maps,
37*52118Smckusick  *	and to when physical maps must be made correct.
38*52118Smckusick  */
39*52118Smckusick 
40*52118Smckusick #include "param.h"
41*52118Smckusick #include "proc.h"
42*52118Smckusick #include "malloc.h"
43*52118Smckusick #include "user.h"
44*52118Smckusick 
45*52118Smckusick #include "vm/vm.h"
46*52118Smckusick #include "vm/vm_kern.h"
47*52118Smckusick #include "vm/vm_page.h"
48*52118Smckusick 
49*52118Smckusick #include "../include/machConst.h"
50*52118Smckusick #include "pte.h"
51*52118Smckusick 
52*52118Smckusick /*
53*52118Smckusick  * For each vm_page_t, there is a list of all currently valid virtual
54*52118Smckusick  * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
55*52118Smckusick  * XXX really should do this as a part of the higher level code.
56*52118Smckusick  */
57*52118Smckusick typedef struct pv_entry {
58*52118Smckusick 	struct pv_entry	*pv_next;	/* next pv_entry */
59*52118Smckusick 	struct pmap	*pv_pmap;	/* pmap where mapping lies */
60*52118Smckusick 	vm_offset_t	pv_va;		/* virtual address for mapping */
61*52118Smckusick 	int		pv_flags;	/* flags */
62*52118Smckusick } *pv_entry_t;
63*52118Smckusick 
64*52118Smckusick pv_entry_t	pv_table;	/* array of entries, one per page */
65*52118Smckusick extern void	pmap_remove_pv();
66*52118Smckusick 
67*52118Smckusick #define pa_index(pa)		atop((pa) - first_phys_addr)
68*52118Smckusick #define pa_to_pvh(pa)		(&pv_table[pa_index(pa)])
69*52118Smckusick 
70*52118Smckusick #ifdef DEBUG
71*52118Smckusick struct {
72*52118Smckusick 	int kernel;	/* entering kernel mapping */
73*52118Smckusick 	int user;	/* entering user mapping */
74*52118Smckusick 	int ptpneeded;	/* needed to allocate a PT page */
75*52118Smckusick 	int pwchange;	/* no mapping change, just wiring or protection */
76*52118Smckusick 	int wchange;	/* no mapping change, just wiring */
77*52118Smckusick 	int mchange;	/* was mapped but mapping to different page */
78*52118Smckusick 	int managed;	/* a managed page */
79*52118Smckusick 	int firstpv;	/* first mapping for this PA */
80*52118Smckusick 	int secondpv;	/* second mapping for this PA */
81*52118Smckusick 	int ci;		/* cache inhibited */
82*52118Smckusick 	int unmanaged;	/* not a managed page */
83*52118Smckusick 	int flushes;	/* cache flushes */
84*52118Smckusick 	int cachehit;	/* new entry forced valid entry out */
85*52118Smckusick } enter_stats;
86*52118Smckusick struct {
87*52118Smckusick 	int calls;
88*52118Smckusick 	int removes;
89*52118Smckusick 	int flushes;
90*52118Smckusick 	int pidflushes;	/* HW pid stolen */
91*52118Smckusick 	int pvfirst;
92*52118Smckusick 	int pvsearch;
93*52118Smckusick } remove_stats;
94*52118Smckusick 
95*52118Smckusick int pmapdebug;
96*52118Smckusick #define PDB_FOLLOW	0x0001
97*52118Smckusick #define PDB_INIT	0x0002
98*52118Smckusick #define PDB_ENTER	0x0004
99*52118Smckusick #define PDB_REMOVE	0x0008
100*52118Smckusick #define PDB_CREATE	0x0010
101*52118Smckusick #define PDB_PTPAGE	0x0020
102*52118Smckusick #define PDB_CACHE	0x0040
103*52118Smckusick #define PDB_BITS	0x0080
104*52118Smckusick #define PDB_COLLECT	0x0100
105*52118Smckusick #define PDB_PROTECT	0x0200
106*52118Smckusick #define PDB_TLBPID	0x0400
107*52118Smckusick #define PDB_PARANOIA	0x2000
108*52118Smckusick #define PDB_WIRING	0x4000
109*52118Smckusick #define PDB_PVDUMP	0x8000
110*52118Smckusick 
111*52118Smckusick #endif /* DEBUG */
112*52118Smckusick 
113*52118Smckusick u_int	whichpids[2] = {	/* bit mask of hardware PID's in use */
114*52118Smckusick 	3, 0
115*52118Smckusick };
116*52118Smckusick 
117*52118Smckusick struct pmap	kernel_pmap_store;
118*52118Smckusick pmap_t		kernel_pmap;
119*52118Smckusick pmap_t		cur_pmap;	/* current pmap mapped in hardware */
120*52118Smckusick 
121*52118Smckusick vm_offset_t    	avail_start;	/* PA of first available physical page */
122*52118Smckusick vm_offset_t	avail_end;	/* PA of last available physical page */
123*52118Smckusick vm_size_t	mem_size;	/* memory size in bytes */
124*52118Smckusick vm_offset_t	virtual_avail;  /* VA of first avail page (after kernel bss)*/
125*52118Smckusick vm_offset_t	virtual_end;	/* VA of last avail page (end of kernel AS) */
126*52118Smckusick int		pmaxpagesperpage;	/* PAGE_SIZE / NBPG */
127*52118Smckusick #ifdef ATTR
128*52118Smckusick char		*pmap_attributes;	/* reference and modify bits */
129*52118Smckusick #endif
130*52118Smckusick pmap_hash_t	zero_pmap_hash;		/* empty TLB hash table for init */
131*52118Smckusick 
132*52118Smckusick /*
133*52118Smckusick  *	Bootstrap the system enough to run with virtual memory.
134*52118Smckusick  */
135*52118Smckusick void
136*52118Smckusick pmap_bootstrap(firstaddr)
137*52118Smckusick 	vm_offset_t firstaddr;
138*52118Smckusick {
139*52118Smckusick 	register int i;
140*52118Smckusick 	vm_offset_t start = firstaddr;
141*52118Smckusick 	extern int maxmem, physmem;
142*52118Smckusick 
143*52118Smckusick 	/*
144*52118Smckusick 	 * Allocate a TLB hash table for the kernel.
145*52118Smckusick 	 * This could be a KSEG0 address and thus save TLB entries but
146*52118Smckusick 	 * its faster and simpler in assembly language to have a
147*52118Smckusick 	 * fixed address that can be accessed with a 16 bit signed offset.
148*52118Smckusick 	 * Note: the kernel pm_hash field is null, user pm_hash fields are
149*52118Smckusick 	 * either the table or zero_pmap_hash.
150*52118Smckusick 	 */
151*52118Smckusick 	kernel_pmap_store.pm_hash = (pmap_hash_t)0;
152*52118Smckusick 	for (i = 0; i < PMAP_HASH_KPAGES; i++) {
153*52118Smckusick 		MachTLBWriteIndexed(i + UPAGES + PMAP_HASH_UPAGES,
154*52118Smckusick 			PMAP_HASH_KADDR + (i << PGSHIFT),
155*52118Smckusick 			firstaddr | PG_V | PG_M | PG_G);
156*52118Smckusick 		firstaddr += NBPG;
157*52118Smckusick 	}
158*52118Smckusick 
159*52118Smckusick 	/*
160*52118Smckusick 	 * Allocate an empty TLB hash table for initial pmap's.
161*52118Smckusick 	 */
162*52118Smckusick 	zero_pmap_hash = (pmap_hash_t)firstaddr;
163*52118Smckusick 	firstaddr += PMAP_HASH_UPAGES * NBPG;
164*52118Smckusick 
165*52118Smckusick 	/* init proc[0]'s pmap hash table */
166*52118Smckusick 	for (i = 0; i < PMAP_HASH_UPAGES; i++) {
167*52118Smckusick 		kernel_pmap_store.pm_hash_ptes[i] =
168*52118Smckusick 			((u_int)zero_pmap_hash + (i << PGSHIFT)) | PG_V | PG_RO;
169*52118Smckusick 		MachTLBWriteIndexed(i + UPAGES,
170*52118Smckusick 			(PMAP_HASH_UADDR + (i << PGSHIFT)) |
171*52118Smckusick 				(1 << VMMACH_TLB_PID_SHIFT),
172*52118Smckusick 			kernel_pmap_store.pm_hash_ptes[i]);
173*52118Smckusick 	}
174*52118Smckusick 
175*52118Smckusick 	/*
176*52118Smckusick 	 * Allocate memory for pv_table.
177*52118Smckusick 	 * This will allocate more entries than we really need.
178*52118Smckusick 	 * We should do this in pmap_init when we know the actual
179*52118Smckusick 	 * phys_start and phys_end but its better to use phys addresses
180*52118Smckusick 	 * rather than kernel virtual addresses mapped through the TLB.
181*52118Smckusick 	 */
182*52118Smckusick 	i = (maxmem - pmax_btop(firstaddr)) * sizeof(struct pv_entry);
183*52118Smckusick 	i = pmax_round_page(i);
184*52118Smckusick 	pv_table = (pv_entry_t)firstaddr;
185*52118Smckusick 	firstaddr += i;
186*52118Smckusick 
187*52118Smckusick 	/*
188*52118Smckusick 	 * Clear allocated memory.
189*52118Smckusick 	 */
190*52118Smckusick 	bzero((caddr_t)start, firstaddr - start);
191*52118Smckusick 
192*52118Smckusick 	avail_start = firstaddr;
193*52118Smckusick 	avail_end = pmax_ptob(maxmem);
194*52118Smckusick 	mem_size = avail_end - avail_start;
195*52118Smckusick 
196*52118Smckusick 	virtual_avail = VM_MIN_KERNEL_ADDRESS;
197*52118Smckusick 	virtual_end = VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES * NPTEPG * NBPG;
198*52118Smckusick 	/* XXX need to decide how to set cnt.v_page_size */
199*52118Smckusick 	pmaxpagesperpage = 1;
200*52118Smckusick 
201*52118Smckusick 	/*
202*52118Smckusick 	 * The kernel's pmap is statically allocated so we don't
203*52118Smckusick 	 * have to use pmap_create, which is unlikely to work
204*52118Smckusick 	 * correctly at this part of the boot sequence.
205*52118Smckusick 	 */
206*52118Smckusick 	kernel_pmap = cur_pmap = &kernel_pmap_store;
207*52118Smckusick 	simple_lock_init(&kernel_pmap->pm_lock);
208*52118Smckusick 	kernel_pmap->pm_count = 1;
209*52118Smckusick }
210*52118Smckusick 
211*52118Smckusick /*
212*52118Smckusick  * Bootstrap memory allocator. This function allows for early dynamic
213*52118Smckusick  * memory allocation until the virtual memory system has been bootstrapped.
214*52118Smckusick  * After that point, either kmem_alloc or malloc should be used. This
215*52118Smckusick  * function works by stealing pages from the (to be) managed page pool,
216*52118Smckusick  * stealing virtual address space, then mapping the pages and zeroing them.
217*52118Smckusick  *
218*52118Smckusick  * It should be used from pmap_bootstrap till vm_page_startup, afterwards
219*52118Smckusick  * it cannot be used, and will generate a panic if tried. Note that this
220*52118Smckusick  * memory will never be freed, and in essence it is wired down.
221*52118Smckusick  */
222*52118Smckusick void *
223*52118Smckusick pmap_bootstrap_alloc(size)
224*52118Smckusick 	int size;
225*52118Smckusick {
226*52118Smckusick 	vm_offset_t val;
227*52118Smckusick 	extern boolean_t vm_page_startup_initialized;
228*52118Smckusick 
229*52118Smckusick 	if (vm_page_startup_initialized)
230*52118Smckusick 		panic("pmap_bootstrap_alloc: called after startup initialized");
231*52118Smckusick 
232*52118Smckusick 	val = avail_start;
233*52118Smckusick 	size = round_page(size);
234*52118Smckusick 	avail_start += size;
235*52118Smckusick 
236*52118Smckusick 	blkclr((caddr_t) val, size);
237*52118Smckusick 	return ((void *) val);
238*52118Smckusick }
239*52118Smckusick 
240*52118Smckusick /*
241*52118Smckusick  *	Initialize the pmap module.
242*52118Smckusick  *	Called by vm_init, to initialize any structures that the pmap
243*52118Smckusick  *	system needs to map virtual memory.
244*52118Smckusick  */
245*52118Smckusick void
246*52118Smckusick pmap_init(phys_start, phys_end)
247*52118Smckusick 	vm_offset_t phys_start, phys_end;
248*52118Smckusick {
249*52118Smckusick 
250*52118Smckusick #ifdef DEBUG
251*52118Smckusick 	if (pmapdebug & PDB_FOLLOW)
252*52118Smckusick 		printf("pmap_init(%x, %x)\n", phys_start, phys_end);
253*52118Smckusick #endif
254*52118Smckusick }
255*52118Smckusick 
256*52118Smckusick /*
257*52118Smckusick  *	Used to map a range of physical addresses into kernel
258*52118Smckusick  *	virtual address space.
259*52118Smckusick  *
260*52118Smckusick  *	This routine should only be called by vm_page_startup()
261*52118Smckusick  *	with KSEG0 addresses.
262*52118Smckusick  */
263*52118Smckusick vm_offset_t
264*52118Smckusick pmap_map(virt, start, end, prot)
265*52118Smckusick 	vm_offset_t virt;
266*52118Smckusick 	vm_offset_t start;
267*52118Smckusick 	vm_offset_t end;
268*52118Smckusick 	int prot;
269*52118Smckusick {
270*52118Smckusick 
271*52118Smckusick #ifdef DEBUG
272*52118Smckusick 	if (pmapdebug & PDB_FOLLOW)
273*52118Smckusick 		printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot);
274*52118Smckusick #endif
275*52118Smckusick 
276*52118Smckusick 	return(round_page(end));
277*52118Smckusick }
278*52118Smckusick 
279*52118Smckusick /*
280*52118Smckusick  *	Create and return a physical map.
281*52118Smckusick  *
282*52118Smckusick  *	If the size specified for the map
283*52118Smckusick  *	is zero, the map is an actual physical
284*52118Smckusick  *	map, and may be referenced by the
285*52118Smckusick  *	hardware.
286*52118Smckusick  *
287*52118Smckusick  *	If the size specified is non-zero,
288*52118Smckusick  *	the map will be used in software only, and
289*52118Smckusick  *	is bounded by that size.
290*52118Smckusick  */
291*52118Smckusick pmap_t
292*52118Smckusick pmap_create(size)
293*52118Smckusick 	vm_size_t size;
294*52118Smckusick {
295*52118Smckusick 	register pmap_t pmap;
296*52118Smckusick 
297*52118Smckusick #ifdef DEBUG
298*52118Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
299*52118Smckusick 		printf("pmap_create(%x)\n", size);
300*52118Smckusick #endif
301*52118Smckusick 	/*
302*52118Smckusick 	 * Software use map does not need a pmap
303*52118Smckusick 	 */
304*52118Smckusick 	if (size)
305*52118Smckusick 		return(NULL);
306*52118Smckusick 
307*52118Smckusick 	printf("pmap_create(%x) XXX\n", size); /* XXX */
308*52118Smckusick 	/* XXX: is it ok to wait here? */
309*52118Smckusick 	pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
310*52118Smckusick #ifdef notifwewait
311*52118Smckusick 	if (pmap == NULL)
312*52118Smckusick 		panic("pmap_create: cannot allocate a pmap");
313*52118Smckusick #endif
314*52118Smckusick 	bzero(pmap, sizeof(*pmap));
315*52118Smckusick 	pmap_pinit(pmap);
316*52118Smckusick 	return (pmap);
317*52118Smckusick }
318*52118Smckusick 
319*52118Smckusick /*
320*52118Smckusick  * Initialize a preallocated and zeroed pmap structure,
321*52118Smckusick  * such as one in a vmspace structure.
322*52118Smckusick  */
323*52118Smckusick void
324*52118Smckusick pmap_pinit(pmap)
325*52118Smckusick 	register struct pmap *pmap;
326*52118Smckusick {
327*52118Smckusick 	register int i;
328*52118Smckusick 	extern struct vmspace vmspace0;
329*52118Smckusick 
330*52118Smckusick #ifdef DEBUG
331*52118Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
332*52118Smckusick 		printf("pmap_pinit(%x)\n", pmap);
333*52118Smckusick #endif
334*52118Smckusick 	simple_lock_init(&pmap->pm_lock);
335*52118Smckusick 	pmap->pm_count = 1;
336*52118Smckusick 	pmap->pm_flags = 0;
337*52118Smckusick 	pmap->pm_hash = zero_pmap_hash;
338*52118Smckusick 	for (i = 0; i < PMAP_HASH_UPAGES; i++)
339*52118Smckusick 		pmap->pm_hash_ptes[i] =
340*52118Smckusick 			((u_int)zero_pmap_hash + (i << PGSHIFT)) | PG_V | PG_RO;
341*52118Smckusick 	if (pmap == &vmspace0.vm_pmap)
342*52118Smckusick 		pmap->pm_tlbpid = 1;	/* preallocated in mach_init() */
343*52118Smckusick 	else
344*52118Smckusick 		pmap->pm_tlbpid = -1;	/* none allocated yet */
345*52118Smckusick }
346*52118Smckusick 
347*52118Smckusick /*
348*52118Smckusick  *	Retire the given physical map from service.
349*52118Smckusick  *	Should only be called if the map contains
350*52118Smckusick  *	no valid mappings.
351*52118Smckusick  */
352*52118Smckusick void
353*52118Smckusick pmap_destroy(pmap)
354*52118Smckusick 	register pmap_t pmap;
355*52118Smckusick {
356*52118Smckusick 	int count;
357*52118Smckusick 
358*52118Smckusick #ifdef DEBUG
359*52118Smckusick 	if (pmapdebug & PDB_FOLLOW)
360*52118Smckusick 		printf("pmap_destroy(%x)\n", pmap);
361*52118Smckusick #endif
362*52118Smckusick 	if (pmap == NULL)
363*52118Smckusick 		return;
364*52118Smckusick 
365*52118Smckusick 	printf("pmap_destroy(%x) XXX\n", pmap); /* XXX */
366*52118Smckusick 	simple_lock(&pmap->pm_lock);
367*52118Smckusick 	count = --pmap->pm_count;
368*52118Smckusick 	simple_unlock(&pmap->pm_lock);
369*52118Smckusick 	if (count == 0) {
370*52118Smckusick 		pmap_release(pmap);
371*52118Smckusick 		free((caddr_t)pmap, M_VMPMAP);
372*52118Smckusick 	}
373*52118Smckusick }
374*52118Smckusick 
375*52118Smckusick /*
376*52118Smckusick  * Release any resources held by the given physical map.
377*52118Smckusick  * Called when a pmap initialized by pmap_pinit is being released.
378*52118Smckusick  * Should only be called if the map contains no valid mappings.
379*52118Smckusick  */
380*52118Smckusick void
381*52118Smckusick pmap_release(pmap)
382*52118Smckusick 	register pmap_t pmap;
383*52118Smckusick {
384*52118Smckusick 	register int id;
385*52118Smckusick #ifdef DIAGNOSTIC
386*52118Smckusick 	register int i;
387*52118Smckusick #endif
388*52118Smckusick 
389*52118Smckusick #ifdef DEBUG
390*52118Smckusick 	if (pmapdebug & PDB_FOLLOW)
391*52118Smckusick 		printf("pmap_release(%x)\n", pmap);
392*52118Smckusick #endif
393*52118Smckusick 
394*52118Smckusick 	if (pmap->pm_hash && pmap->pm_hash != zero_pmap_hash) {
395*52118Smckusick 		kmem_free(kernel_map, (vm_offset_t)pmap->pm_hash,
396*52118Smckusick 			PMAP_HASH_SIZE);
397*52118Smckusick 		pmap->pm_hash = zero_pmap_hash;
398*52118Smckusick 	}
399*52118Smckusick 	if ((id = pmap->pm_tlbpid) < 0)
400*52118Smckusick 		return;
401*52118Smckusick #ifdef DIAGNOSTIC
402*52118Smckusick 	if (!(whichpids[id >> 5] & (1 << (id & 0x1F))))
403*52118Smckusick 		panic("pmap_release: id free");
404*52118Smckusick #endif
405*52118Smckusick 	MachTLBFlushPID(id);
406*52118Smckusick 	whichpids[id >> 5] &= ~(1 << (id & 0x1F));
407*52118Smckusick 	pmap->pm_flags &= ~PM_MODIFIED;
408*52118Smckusick 	pmap->pm_tlbpid = -1;
409*52118Smckusick 	if (pmap == cur_pmap)
410*52118Smckusick 		cur_pmap = (pmap_t)0;
411*52118Smckusick #ifdef DIAGNOSTIC
412*52118Smckusick 	/* invalidate user PTE cache */
413*52118Smckusick 	for (i = 0; i < PMAP_HASH_UPAGES; i++)
414*52118Smckusick 		MachTLBWriteIndexed(i + UPAGES, MACH_RESERVED_ADDR, 0);
415*52118Smckusick #endif
416*52118Smckusick }
417*52118Smckusick 
418*52118Smckusick /*
419*52118Smckusick  *	Add a reference to the specified pmap.
420*52118Smckusick  */
421*52118Smckusick void
422*52118Smckusick pmap_reference(pmap)
423*52118Smckusick 	pmap_t pmap;
424*52118Smckusick {
425*52118Smckusick 
426*52118Smckusick #ifdef DEBUG
427*52118Smckusick 	if (pmapdebug & PDB_FOLLOW)
428*52118Smckusick 		printf("pmap_reference(%x)\n", pmap);
429*52118Smckusick #endif
430*52118Smckusick 	if (pmap != NULL) {
431*52118Smckusick 		simple_lock(&pmap->pm_lock);
432*52118Smckusick 		pmap->pm_count++;
433*52118Smckusick 		simple_unlock(&pmap->pm_lock);
434*52118Smckusick 	}
435*52118Smckusick }
436*52118Smckusick 
437*52118Smckusick /*
438*52118Smckusick  *	Remove the given range of addresses from the specified map.
439*52118Smckusick  *
440*52118Smckusick  *	It is assumed that the start and end are properly
441*52118Smckusick  *	rounded to the page size.
442*52118Smckusick  */
443*52118Smckusick void
444*52118Smckusick pmap_remove(pmap, sva, eva)
445*52118Smckusick 	register pmap_t pmap;
446*52118Smckusick 	vm_offset_t sva, eva;
447*52118Smckusick {
448*52118Smckusick 	register vm_offset_t va;
449*52118Smckusick 	register pv_entry_t pv, npv;
450*52118Smckusick 	pmap_hash_t hp;
451*52118Smckusick 	unsigned entry;
452*52118Smckusick 
453*52118Smckusick #ifdef DEBUG
454*52118Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
455*52118Smckusick 		printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva);
456*52118Smckusick 	remove_stats.calls++;
457*52118Smckusick #endif
458*52118Smckusick 	if (pmap == NULL)
459*52118Smckusick 		return;
460*52118Smckusick 
461*52118Smckusick 	/* anything in the cache? */
462*52118Smckusick 	if (pmap->pm_tlbpid < 0 || pmap->pm_hash == zero_pmap_hash)
463*52118Smckusick 		return;
464*52118Smckusick 
465*52118Smckusick 	if (!pmap->pm_hash) {
466*52118Smckusick 		register pt_entry_t *pte;
467*52118Smckusick 
468*52118Smckusick 		/* remove entries from kernel pmap */
469*52118Smckusick 		pte = kvtopte(sva);
470*52118Smckusick 		for (va = sva; va < eva; va += NBPG, pte++) {
471*52118Smckusick 			entry = pte->pt_entry;
472*52118Smckusick 			if (!(entry & PG_V))
473*52118Smckusick 				continue;
474*52118Smckusick 			if (entry & PG_WIRED)
475*52118Smckusick 				pmap->pm_stats.wired_count--;
476*52118Smckusick 			pmap->pm_stats.resident_count--;
477*52118Smckusick 			pmap_remove_pv(pmap, va, entry & PG_FRAME);
478*52118Smckusick #ifdef ATTR
479*52118Smckusick 			pmap_attributes[atop(entry - KERNBASE)] = 0;
480*52118Smckusick #endif
481*52118Smckusick 			pte->pt_entry = PG_NV;
482*52118Smckusick 			/*
483*52118Smckusick 			 * Flush the TLB for the given address.
484*52118Smckusick 			 */
485*52118Smckusick 			MachTLBFlushAddr(va);
486*52118Smckusick #ifdef DEBUG
487*52118Smckusick 			remove_stats.flushes++;
488*52118Smckusick #endif
489*52118Smckusick 		}
490*52118Smckusick 		return;
491*52118Smckusick 	}
492*52118Smckusick 
493*52118Smckusick 	va = sva | (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
494*52118Smckusick 	eva |= (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
495*52118Smckusick 	/*
496*52118Smckusick 	 * If we are not in the current address space, just flush the
497*52118Smckusick 	 * software cache and not the hardware.
498*52118Smckusick 	 */
499*52118Smckusick 	if (pmap != cur_pmap) {
500*52118Smckusick 		for (; va < eva; va += NBPG) {
501*52118Smckusick 			hp = &pmap->pm_hash[PMAP_HASH(va)];
502*52118Smckusick 			if (hp->high != va)
503*52118Smckusick 				continue;
504*52118Smckusick 
505*52118Smckusick 			hp->high = 0;
506*52118Smckusick 			entry = hp->low;
507*52118Smckusick 			if (entry & PG_WIRED)
508*52118Smckusick 				pmap->pm_stats.wired_count--;
509*52118Smckusick 			pmap->pm_stats.resident_count--;
510*52118Smckusick 			pmap_remove_pv(pmap, va & PG_FRAME, entry & PG_FRAME);
511*52118Smckusick #ifdef ATTR
512*52118Smckusick 			pmap_attributes[atop(entry - KERNBASE)] = 0;
513*52118Smckusick #endif
514*52118Smckusick 			pmap->pm_flags |= PM_MODIFIED;
515*52118Smckusick #ifdef DEBUG
516*52118Smckusick 			remove_stats.removes++;
517*52118Smckusick #endif
518*52118Smckusick 		}
519*52118Smckusick 		return;
520*52118Smckusick 	}
521*52118Smckusick 
522*52118Smckusick 	for (; va < eva; va += NBPG) {
523*52118Smckusick 		hp = &pmap->pm_hash[PMAP_HASH(va)];
524*52118Smckusick 		if (hp->high != va)
525*52118Smckusick 			continue;
526*52118Smckusick 
527*52118Smckusick 		hp->high = 0;
528*52118Smckusick 		entry = hp->low;
529*52118Smckusick 		if (entry & PG_WIRED)
530*52118Smckusick 			pmap->pm_stats.wired_count--;
531*52118Smckusick 		pmap->pm_stats.resident_count--;
532*52118Smckusick 		pmap_remove_pv(pmap, va & PG_FRAME, entry & PG_FRAME);
533*52118Smckusick #ifdef ATTR
534*52118Smckusick 		pmap_attributes[atop(entry - KERNBASE)] = 0;
535*52118Smckusick #endif
536*52118Smckusick 		/*
537*52118Smckusick 		 * Flush the TLB for the given address.
538*52118Smckusick 		 */
539*52118Smckusick 		MachTLBFlushAddr(va);
540*52118Smckusick #ifdef DEBUG
541*52118Smckusick 		remove_stats.flushes++;
542*52118Smckusick #endif
543*52118Smckusick 	}
544*52118Smckusick }
545*52118Smckusick 
546*52118Smckusick /*
547*52118Smckusick  *	pmap_page_protect:
548*52118Smckusick  *
549*52118Smckusick  *	Lower the permission for all mappings to a given page.
550*52118Smckusick  */
551*52118Smckusick void
552*52118Smckusick pmap_page_protect(pa, prot)
553*52118Smckusick 	vm_offset_t pa;
554*52118Smckusick 	vm_prot_t prot;
555*52118Smckusick {
556*52118Smckusick 	register pv_entry_t pv;
557*52118Smckusick 	register vm_offset_t va;
558*52118Smckusick 	int s;
559*52118Smckusick 
560*52118Smckusick #ifdef DEBUG
561*52118Smckusick 	if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
562*52118Smckusick 	    prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))
563*52118Smckusick 		printf("pmap_page_protect(%x, %x)\n", pa, prot);
564*52118Smckusick #endif
565*52118Smckusick 	if (!IS_VM_PHYSADDR(pa))
566*52118Smckusick 		return;
567*52118Smckusick 
568*52118Smckusick 	switch (prot) {
569*52118Smckusick 	case VM_PROT_ALL:
570*52118Smckusick 		break;
571*52118Smckusick 
572*52118Smckusick 	/* copy_on_write */
573*52118Smckusick 	case VM_PROT_READ:
574*52118Smckusick 	case VM_PROT_READ|VM_PROT_EXECUTE:
575*52118Smckusick 		pv = pa_to_pvh(pa);
576*52118Smckusick 		s = splimp();
577*52118Smckusick 		/*
578*52118Smckusick 		 * Loop over all current mappings setting/clearing as appropos.
579*52118Smckusick 		 */
580*52118Smckusick 		if (pv->pv_pmap != NULL) {
581*52118Smckusick 			for (; pv; pv = pv->pv_next) {
582*52118Smckusick 				extern vm_offset_t pager_sva, pager_eva;
583*52118Smckusick 				va = pv->pv_va;
584*52118Smckusick 
585*52118Smckusick 				/*
586*52118Smckusick 				 * XXX don't write protect pager mappings
587*52118Smckusick 				 */
588*52118Smckusick 				if (va >= pager_sva && va < pager_eva)
589*52118Smckusick 					continue;
590*52118Smckusick 				pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE,
591*52118Smckusick 					prot);
592*52118Smckusick 			}
593*52118Smckusick 		}
594*52118Smckusick 		splx(s);
595*52118Smckusick 		break;
596*52118Smckusick 
597*52118Smckusick 	/* remove_all */
598*52118Smckusick 	default:
599*52118Smckusick 		pv = pa_to_pvh(pa);
600*52118Smckusick 		s = splimp();
601*52118Smckusick 		while (pv->pv_pmap != NULL) {
602*52118Smckusick 			pmap_remove(pv->pv_pmap, pv->pv_va,
603*52118Smckusick 				    pv->pv_va + PAGE_SIZE);
604*52118Smckusick 		}
605*52118Smckusick 		splx(s);
606*52118Smckusick 	}
607*52118Smckusick }
608*52118Smckusick 
609*52118Smckusick /*
610*52118Smckusick  *	Set the physical protection on the
611*52118Smckusick  *	specified range of this map as requested.
612*52118Smckusick  */
613*52118Smckusick void
614*52118Smckusick pmap_protect(pmap, sva, eva, prot)
615*52118Smckusick 	register pmap_t pmap;
616*52118Smckusick 	vm_offset_t sva, eva;
617*52118Smckusick 	vm_prot_t prot;
618*52118Smckusick {
619*52118Smckusick 	register vm_offset_t va;
620*52118Smckusick 	pmap_hash_t hp;
621*52118Smckusick 	u_int p;
622*52118Smckusick 
623*52118Smckusick #ifdef DEBUG
624*52118Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
625*52118Smckusick 		printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot);
626*52118Smckusick #endif
627*52118Smckusick 	if (pmap == NULL)
628*52118Smckusick 		return;
629*52118Smckusick 
630*52118Smckusick 	/* anything in the software cache? */
631*52118Smckusick 	if (pmap->pm_tlbpid < 0 || pmap->pm_hash == zero_pmap_hash)
632*52118Smckusick 		return;
633*52118Smckusick 
634*52118Smckusick 	if (!(prot & VM_PROT_READ)) {
635*52118Smckusick 		pmap_remove(pmap, sva, eva);
636*52118Smckusick 		return;
637*52118Smckusick 	}
638*52118Smckusick 
639*52118Smckusick 	if (!pmap->pm_hash) {
640*52118Smckusick 		register pt_entry_t *pte;
641*52118Smckusick 
642*52118Smckusick 		/*
643*52118Smckusick 		 * Change entries in kernel pmap.
644*52118Smckusick 		 * This will trap if the page is writeable (in order to set
645*52118Smckusick 		 * the dirty bit) even if the dirty bit is already set. The
646*52118Smckusick 		 * optimization isn't worth the effort since this code isn't
647*52118Smckusick 		 * executed much. The common case is to make a user page
648*52118Smckusick 		 * read-only.
649*52118Smckusick 		 */
650*52118Smckusick 		p = (prot & VM_PROT_WRITE) ? PG_RW : PG_RO;
651*52118Smckusick 		pte = kvtopte(sva);
652*52118Smckusick 		for (va = sva; va < eva; va += NBPG, pte++) {
653*52118Smckusick 			if (!(pte->pt_entry & PG_V))
654*52118Smckusick 				continue;
655*52118Smckusick 			pte->pt_entry = (pte->pt_entry & ~(PG_M | PG_RO)) | p;
656*52118Smckusick 			/*
657*52118Smckusick 			 * Update the TLB if the given address is in the cache.
658*52118Smckusick 			 */
659*52118Smckusick 			MachTLBUpdate(va, pte->pt_entry);
660*52118Smckusick 		}
661*52118Smckusick 		return;
662*52118Smckusick 	}
663*52118Smckusick 
664*52118Smckusick 	p = (prot & VM_PROT_WRITE) ? PG_RW : PG_RO;
665*52118Smckusick 	va = sva | (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
666*52118Smckusick 	eva |= (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
667*52118Smckusick 	/*
668*52118Smckusick 	 * If we are not in the current address space, just flush the
669*52118Smckusick 	 * software cache and not the hardware.
670*52118Smckusick 	 */
671*52118Smckusick 	if (pmap != cur_pmap) {
672*52118Smckusick 		for (; va < eva; va += NBPG) {
673*52118Smckusick 			hp = &pmap->pm_hash[PMAP_HASH(va)];
674*52118Smckusick 			if (hp->high != va)
675*52118Smckusick 				continue;
676*52118Smckusick 
677*52118Smckusick 			hp->low = (hp->low & ~(PG_M | PG_RO)) | p;
678*52118Smckusick 			pmap->pm_flags |= PM_MODIFIED;
679*52118Smckusick 		}
680*52118Smckusick 		return;
681*52118Smckusick 	}
682*52118Smckusick 
683*52118Smckusick 	for (; va < eva; va += NBPG) {
684*52118Smckusick 		hp = &pmap->pm_hash[PMAP_HASH(va)];
685*52118Smckusick 		if (hp->high != va)
686*52118Smckusick 			continue;
687*52118Smckusick 
688*52118Smckusick 		hp->low = (hp->low & ~(PG_M | PG_RO)) | p;
689*52118Smckusick 		/*
690*52118Smckusick 		 * Update the TLB if the given address is in the cache.
691*52118Smckusick 		 */
692*52118Smckusick 		MachTLBUpdate(hp->high, hp->low);
693*52118Smckusick 	}
694*52118Smckusick }
695*52118Smckusick 
696*52118Smckusick /*
697*52118Smckusick  *	Insert the given physical page (p) at
698*52118Smckusick  *	the specified virtual address (v) in the
699*52118Smckusick  *	target physical map with the protection requested.
700*52118Smckusick  *
701*52118Smckusick  *	If specified, the page will be wired down, meaning
702*52118Smckusick  *	that the related pte can not be reclaimed.
703*52118Smckusick  *
704*52118Smckusick  *	NB:  This is the only routine which MAY NOT lazy-evaluate
705*52118Smckusick  *	or lose information.  That is, this routine must actually
706*52118Smckusick  *	insert this page into the given map NOW.
707*52118Smckusick  */
708*52118Smckusick void
709*52118Smckusick pmap_enter(pmap, va, pa, prot, wired)
710*52118Smckusick 	register pmap_t pmap;
711*52118Smckusick 	vm_offset_t va;
712*52118Smckusick 	register vm_offset_t pa;
713*52118Smckusick 	vm_prot_t prot;
714*52118Smckusick 	boolean_t wired;
715*52118Smckusick {
716*52118Smckusick 	register pmap_hash_t hp;
717*52118Smckusick 	register u_int npte;
718*52118Smckusick 	register int i;
719*52118Smckusick 
720*52118Smckusick #ifdef DEBUG
721*52118Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
722*52118Smckusick 		printf("pmap_enter(%x, %x, %x, %x, %x)\n",
723*52118Smckusick 		       pmap, va, pa, prot, wired);
724*52118Smckusick #endif
725*52118Smckusick #ifdef DIAGNOSTIC
726*52118Smckusick 	if (!pmap)
727*52118Smckusick 		panic("pmap_enter: pmap");
728*52118Smckusick 	if (pmap->pm_tlbpid < 0)
729*52118Smckusick 		panic("pmap_enter: tlbpid");
730*52118Smckusick 	if (pmap == kernel_pmap) {
731*52118Smckusick 		enter_stats.kernel++;
732*52118Smckusick 		if ((va & 0xE0000000) != 0xC0000000)
733*52118Smckusick 			panic("pmap_enter: kva");
734*52118Smckusick 	} else {
735*52118Smckusick 		enter_stats.user++;
736*52118Smckusick 		if (va & 0x80000000)
737*52118Smckusick 			panic("pmap_enter: uva");
738*52118Smckusick 	}
739*52118Smckusick 	if (!(prot & VM_PROT_READ))
740*52118Smckusick 		panic("pmap_enter: prot");
741*52118Smckusick #endif
742*52118Smckusick 
743*52118Smckusick 	/*
744*52118Smckusick 	 * See if we need to create a new TLB cache.
745*52118Smckusick 	 */
746*52118Smckusick 	if (pmap->pm_hash == zero_pmap_hash) {
747*52118Smckusick 		register vm_offset_t kva;
748*52118Smckusick 		register pt_entry_t *pte;
749*52118Smckusick 
750*52118Smckusick 		kva = kmem_alloc(kernel_map, PMAP_HASH_SIZE);
751*52118Smckusick 		pmap->pm_hash = (pmap_hash_t)kva;
752*52118Smckusick 
753*52118Smckusick 		/*
754*52118Smckusick 		 * Convert the kernel virtual address to a physical one
755*52118Smckusick 		 * and cache it in the pmap. Note: if the phyical address
756*52118Smckusick 		 * can change (due to memory compaction in kmem_alloc?),
757*52118Smckusick 		 * we will have to update things.
758*52118Smckusick 		 */
759*52118Smckusick 		pte = kvtopte(kva);
760*52118Smckusick 		for (i = 0; i < PMAP_HASH_UPAGES; i++) {
761*52118Smckusick 			pmap->pm_hash_ptes[i] = pte->pt_entry & ~PG_G;
762*52118Smckusick 			pte++;
763*52118Smckusick 		}
764*52118Smckusick 
765*52118Smckusick 		/*
766*52118Smckusick 		 * Map in new TLB cache if it is current.
767*52118Smckusick 		 */
768*52118Smckusick 		if (pmap == cur_pmap) {
769*52118Smckusick #ifdef DIAGNOSTIC
770*52118Smckusick 			if (pmap->pm_tlbpid < 0)
771*52118Smckusick 				panic("pmap_enter: tlbpid");
772*52118Smckusick #endif
773*52118Smckusick 			for (i = 0; i < PMAP_HASH_UPAGES; i++) {
774*52118Smckusick 				MachTLBWriteIndexed(i + UPAGES,
775*52118Smckusick 					(PMAP_HASH_UADDR + (i << PGSHIFT)) |
776*52118Smckusick 						(pmap->pm_tlbpid  <<
777*52118Smckusick 						VMMACH_TLB_PID_SHIFT),
778*52118Smckusick 					pmap->pm_hash_ptes[i]);
779*52118Smckusick 			}
780*52118Smckusick 		}
781*52118Smckusick #ifdef DIAGNOSTIC
782*52118Smckusick 		for (i = 0; i < PAGE_SIZE; i += sizeof(int), kva += sizeof(int))
783*52118Smckusick 			if (*(int *)kva != 0)
784*52118Smckusick 				panic("pmap_enter: *kva != 0");
785*52118Smckusick #endif
786*52118Smckusick 	}
787*52118Smckusick 
788*52118Smckusick 	if (IS_VM_PHYSADDR(pa)) {
789*52118Smckusick 		register pv_entry_t pv, npv;
790*52118Smckusick 		int s;
791*52118Smckusick 
792*52118Smckusick 		if (!(prot & VM_PROT_WRITE))
793*52118Smckusick 			npte = PG_RO;
794*52118Smckusick 		else {
795*52118Smckusick 			register vm_page_t mem;
796*52118Smckusick 
797*52118Smckusick 			mem = PHYS_TO_VM_PAGE(pa);
798*52118Smckusick 			if ((int)va < 0) {
799*52118Smckusick 				/*
800*52118Smckusick 				 * Don't bother to trap on kernel writes,
801*52118Smckusick 				 * just record page as dirty.
802*52118Smckusick 				 */
803*52118Smckusick 				npte = PG_M;
804*52118Smckusick 				mem->clean = FALSE;
805*52118Smckusick 			} else
806*52118Smckusick #ifdef ATTR
807*52118Smckusick 				if ((pmap_attributes[atop(pa - KERNBASE)] &
808*52118Smckusick 				    PMAP_ATTR_MOD) || !mem->clean)
809*52118Smckusick #else
810*52118Smckusick 				if (!mem->clean)
811*52118Smckusick #endif
812*52118Smckusick 					npte = PG_M;
813*52118Smckusick 			else
814*52118Smckusick 				npte = 0;
815*52118Smckusick 		}
816*52118Smckusick 
817*52118Smckusick #ifdef DEBUG
818*52118Smckusick 		enter_stats.managed++;
819*52118Smckusick #endif
820*52118Smckusick 		/*
821*52118Smckusick 		 * Enter the pmap and virtual address into the
822*52118Smckusick 		 * physical to virtual map table.
823*52118Smckusick 		 */
824*52118Smckusick 		pv = pa_to_pvh(pa);
825*52118Smckusick 		s = splimp();
826*52118Smckusick #ifdef DEBUG
827*52118Smckusick 		if (pmapdebug & PDB_ENTER)
828*52118Smckusick 			printf("pmap_enter: pv %x: was %x/%x/%x\n",
829*52118Smckusick 			       pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
830*52118Smckusick #endif
831*52118Smckusick 		if (pv->pv_pmap == NULL) {
832*52118Smckusick 			/*
833*52118Smckusick 			 * No entries yet, use header as the first entry
834*52118Smckusick 			 */
835*52118Smckusick #ifdef DEBUG
836*52118Smckusick 			enter_stats.firstpv++;
837*52118Smckusick #endif
838*52118Smckusick 			pv->pv_va = va;
839*52118Smckusick 			pv->pv_pmap = pmap;
840*52118Smckusick 			pv->pv_next = NULL;
841*52118Smckusick 			pv->pv_flags = 0;
842*52118Smckusick 		} else {
843*52118Smckusick 			/*
844*52118Smckusick 			 * There is at least one other VA mapping this page.
845*52118Smckusick 			 * Place this entry after the header.
846*52118Smckusick 			 *
847*52118Smckusick 			 * Note: the entry may already be in the table if
848*52118Smckusick 			 * we are only changing the protection bits.
849*52118Smckusick 			 */
850*52118Smckusick 			for (npv = pv; npv; npv = npv->pv_next)
851*52118Smckusick 				if (pmap == npv->pv_pmap && va == npv->pv_va) {
852*52118Smckusick #ifdef DIAGNOSTIC
853*52118Smckusick 				    if (!pmap->pm_hash) {
854*52118Smckusick 					unsigned entry;
855*52118Smckusick 
856*52118Smckusick 					entry = kvtopte(va)->pt_entry;
857*52118Smckusick 					if (!(entry & PG_V) ||
858*52118Smckusick 					    (entry & PG_FRAME) != pa)
859*52118Smckusick 			printf("found kva %x pa %x in pv_table but != %x\n",
860*52118Smckusick 				va, pa, entry);
861*52118Smckusick 				    } else {
862*52118Smckusick 					hp = &pmap->pm_hash[PMAP_HASH(va)];
863*52118Smckusick 					if (hp->high != (va |
864*52118Smckusick 					    (pmap->pm_tlbpid <<
865*52118Smckusick 					    VMMACH_TLB_PID_SHIFT)) ||
866*52118Smckusick 					    (hp->low & PG_FRAME) != pa)
867*52118Smckusick 			printf("found va %x pa %x in pv_table but != %x %x\n",
868*52118Smckusick 				va, pa, hp->high, hp->low);
869*52118Smckusick 				    }
870*52118Smckusick #endif
871*52118Smckusick 					goto fnd;
872*52118Smckusick 				}
873*52118Smckusick 			/* can this cause us to recurse forever? */
874*52118Smckusick 			npv = (pv_entry_t)
875*52118Smckusick 				malloc(sizeof *npv, M_VMPVENT, M_NOWAIT);
876*52118Smckusick 			npv->pv_va = va;
877*52118Smckusick 			npv->pv_pmap = pmap;
878*52118Smckusick 			npv->pv_next = pv->pv_next;
879*52118Smckusick 			pv->pv_next = npv;
880*52118Smckusick #ifdef DEBUG
881*52118Smckusick 			if (!npv->pv_next)
882*52118Smckusick 				enter_stats.secondpv++;
883*52118Smckusick #endif
884*52118Smckusick 		fnd:
885*52118Smckusick 			;
886*52118Smckusick 		}
887*52118Smckusick 		splx(s);
888*52118Smckusick 	} else {
889*52118Smckusick 		/*
890*52118Smckusick 		 * Assumption: if it is not part of our managed memory
891*52118Smckusick 		 * then it must be device memory which may be volitile.
892*52118Smckusick 		 */
893*52118Smckusick #ifdef DEBUG
894*52118Smckusick 		enter_stats.unmanaged++;
895*52118Smckusick #endif
896*52118Smckusick 		printf("pmap_enter: UNMANAGED ADDRESS va %x pa %x\n",
897*52118Smckusick 			va, pa); /* XXX */
898*52118Smckusick 		npte = (prot & VM_PROT_WRITE) ? PG_M : PG_RO;
899*52118Smckusick 	}
900*52118Smckusick 
901*52118Smckusick 	if (!pmap->pm_hash) {
902*52118Smckusick 		register pt_entry_t *pte;
903*52118Smckusick 
904*52118Smckusick 		/* enter entries into kernel pmap */
905*52118Smckusick 		pte = kvtopte(va);
906*52118Smckusick 		npte |= pa | PG_V | PG_G;
907*52118Smckusick 		if (wired) {
908*52118Smckusick 			pmap->pm_stats.wired_count += pmaxpagesperpage;
909*52118Smckusick 			npte |= PG_WIRED;
910*52118Smckusick 		}
911*52118Smckusick 		i = pmaxpagesperpage;
912*52118Smckusick 		do {
913*52118Smckusick 			if (!(pte->pt_entry & PG_V)) {
914*52118Smckusick 				pmap->pm_stats.resident_count++;
915*52118Smckusick 				MachTLBWriteRandom(va, npte);
916*52118Smckusick 			} else {
917*52118Smckusick 				/*
918*52118Smckusick 				 * Update the same virtual address entry.
919*52118Smckusick 				 */
920*52118Smckusick 				MachTLBUpdate(va, npte);
921*52118Smckusick 			}
922*52118Smckusick 			pte->pt_entry = npte;
923*52118Smckusick 			va += NBPG;
924*52118Smckusick 			npte += NBPG;
925*52118Smckusick 			pte++;
926*52118Smckusick 		} while (--i != 0);
927*52118Smckusick 		return;
928*52118Smckusick 	}
929*52118Smckusick 
930*52118Smckusick 	/*
931*52118Smckusick 	 * Now validate mapping with desired protection/wiring.
932*52118Smckusick 	 * Assume uniform modified and referenced status for all
933*52118Smckusick 	 * PMAX pages in a MACH page.
934*52118Smckusick 	 */
935*52118Smckusick 	npte |= pa | PG_V;
936*52118Smckusick 	if (wired) {
937*52118Smckusick 		pmap->pm_stats.wired_count += pmaxpagesperpage;
938*52118Smckusick 		npte |= PG_WIRED;
939*52118Smckusick 	}
940*52118Smckusick #ifdef DEBUG
941*52118Smckusick 	if (pmapdebug & PDB_ENTER)
942*52118Smckusick 		printf("pmap_enter: new pte value %x\n", npte);
943*52118Smckusick #endif
944*52118Smckusick 	va |= (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
945*52118Smckusick 	i = pmaxpagesperpage;
946*52118Smckusick 	do {
947*52118Smckusick 		hp = &pmap->pm_hash[PMAP_HASH(va)];
948*52118Smckusick 		if (!hp->high) {
949*52118Smckusick 			pmap->pm_stats.resident_count++;
950*52118Smckusick 			hp->high = va;
951*52118Smckusick 			hp->low = npte;
952*52118Smckusick 			MachTLBWriteRandom(va, npte);
953*52118Smckusick 		} else {
954*52118Smckusick #ifdef DEBUG
955*52118Smckusick 			enter_stats.cachehit++;
956*52118Smckusick #endif
957*52118Smckusick 			if (hp->high == va) {
958*52118Smckusick 				/*
959*52118Smckusick 				 * Update the same entry.
960*52118Smckusick 				 */
961*52118Smckusick 				hp->low = npte;
962*52118Smckusick 				MachTLBUpdate(va, npte);
963*52118Smckusick 			} else if (!(hp->low & PG_WIRED)) {
964*52118Smckusick 				MachTLBFlushAddr(hp->high);
965*52118Smckusick 				pmap_remove_pv(pmap, hp->high & PG_FRAME,
966*52118Smckusick 					hp->low & PG_FRAME);
967*52118Smckusick 				hp->high = va;
968*52118Smckusick 				hp->low = npte;
969*52118Smckusick 				MachTLBWriteRandom(va, npte);
970*52118Smckusick 			} else {
971*52118Smckusick 				/*
972*52118Smckusick 				 * Don't replace wired entries, just update
973*52118Smckusick 				 * the hardware TLB.
974*52118Smckusick 				 * Bug: routines to flush the TLB won't know
975*52118Smckusick 				 * that the entry is in the hardware.
976*52118Smckusick 				 */
977*52118Smckusick 				printf("pmap_enter: wired va %x %x\n", va,
978*52118Smckusick 					hp->low); /* XXX */
979*52118Smckusick 				panic("pmap_enter: wired"); /* XXX */
980*52118Smckusick 				MachTLBWriteRandom(va, npte);
981*52118Smckusick 			}
982*52118Smckusick 		}
983*52118Smckusick 		va += NBPG;
984*52118Smckusick 		npte += NBPG;
985*52118Smckusick 	} while (--i != 0);
986*52118Smckusick }
987*52118Smckusick 
988*52118Smckusick /*
989*52118Smckusick  *	Routine:	pmap_change_wiring
990*52118Smckusick  *	Function:	Change the wiring attribute for a map/virtual-address
991*52118Smckusick  *			pair.
992*52118Smckusick  *	In/out conditions:
993*52118Smckusick  *			The mapping must already exist in the pmap.
994*52118Smckusick  */
995*52118Smckusick void
996*52118Smckusick pmap_change_wiring(pmap, va, wired)
997*52118Smckusick 	register pmap_t	pmap;
998*52118Smckusick 	vm_offset_t va;
999*52118Smckusick 	boolean_t wired;
1000*52118Smckusick {
1001*52118Smckusick 	register pmap_hash_t hp;
1002*52118Smckusick 	u_int p;
1003*52118Smckusick 	int i;
1004*52118Smckusick 
1005*52118Smckusick #ifdef DEBUG
1006*52118Smckusick 	if (pmapdebug & PDB_FOLLOW)
1007*52118Smckusick 		printf("pmap_change_wiring(%x, %x, %x)\n", pmap, va, wired);
1008*52118Smckusick #endif
1009*52118Smckusick 	if (pmap == NULL)
1010*52118Smckusick 		return;
1011*52118Smckusick 
1012*52118Smckusick 	p = wired ? PG_WIRED : 0;
1013*52118Smckusick 
1014*52118Smckusick 	/*
1015*52118Smckusick 	 * Don't need to flush the TLB since PG_WIRED is only in software.
1016*52118Smckusick 	 */
1017*52118Smckusick 	if (!pmap->pm_hash) {
1018*52118Smckusick 		register pt_entry_t *pte;
1019*52118Smckusick 
1020*52118Smckusick 		/* change entries in kernel pmap */
1021*52118Smckusick 		pte = kvtopte(va);
1022*52118Smckusick 		i = pmaxpagesperpage;
1023*52118Smckusick 		if (!(pte->pt_entry & PG_WIRED) && p)
1024*52118Smckusick 			pmap->pm_stats.wired_count += i;
1025*52118Smckusick 		else if ((pte->pt_entry & PG_WIRED) && !p)
1026*52118Smckusick 			pmap->pm_stats.wired_count -= i;
1027*52118Smckusick 		do {
1028*52118Smckusick 			if (!(pte->pt_entry & PG_V))
1029*52118Smckusick 				continue;
1030*52118Smckusick 			pte->pt_entry = (pte->pt_entry & ~PG_WIRED) | p;
1031*52118Smckusick 			pte++;
1032*52118Smckusick 		} while (--i != 0);
1033*52118Smckusick 	} else if (pmap->pm_tlbpid >= 0 && pmap->pm_hash != zero_pmap_hash) {
1034*52118Smckusick 		i = pmaxpagesperpage;
1035*52118Smckusick 		do {
1036*52118Smckusick 			hp = &pmap->pm_hash[PMAP_HASH(va)];
1037*52118Smckusick 			if (!hp->high)
1038*52118Smckusick 				continue;
1039*52118Smckusick 			if (!(hp->low & PG_WIRED) && p)
1040*52118Smckusick 				pmap->pm_stats.wired_count++;
1041*52118Smckusick 			else if ((hp->low & PG_WIRED) && !p)
1042*52118Smckusick 				pmap->pm_stats.wired_count--;
1043*52118Smckusick 			hp->low = (hp->low & ~PG_WIRED) | p;
1044*52118Smckusick 			va += NBPG;
1045*52118Smckusick 		} while (--i != 0);
1046*52118Smckusick 	}
1047*52118Smckusick }
1048*52118Smckusick 
1049*52118Smckusick /*
1050*52118Smckusick  *	Routine:	pmap_extract
1051*52118Smckusick  *	Function:
1052*52118Smckusick  *		Extract the physical page address associated
1053*52118Smckusick  *		with the given map/virtual_address pair.
1054*52118Smckusick  */
1055*52118Smckusick vm_offset_t
1056*52118Smckusick pmap_extract(pmap, va)
1057*52118Smckusick 	register pmap_t	pmap;
1058*52118Smckusick 	vm_offset_t va;
1059*52118Smckusick {
1060*52118Smckusick 	register vm_offset_t pa;
1061*52118Smckusick 	register pmap_hash_t hp;
1062*52118Smckusick 
1063*52118Smckusick #ifdef DEBUG
1064*52118Smckusick 	if (pmapdebug & PDB_FOLLOW)
1065*52118Smckusick 		printf("pmap_extract(%x, %x) -> ", pmap, va);
1066*52118Smckusick #endif
1067*52118Smckusick 
1068*52118Smckusick 	if (!pmap->pm_hash)
1069*52118Smckusick 		pa = kvtopte(va)->pt_entry & PG_FRAME;
1070*52118Smckusick 	else if (pmap->pm_tlbpid >= 0) {
1071*52118Smckusick 		hp = &pmap->pm_hash[PMAP_HASH(va)];
1072*52118Smckusick 		if (hp->high)
1073*52118Smckusick 			pa = hp->low & PG_FRAME;
1074*52118Smckusick 		else
1075*52118Smckusick 			pa = 0;
1076*52118Smckusick 	} else
1077*52118Smckusick 		pa = 0;
1078*52118Smckusick 
1079*52118Smckusick #ifdef DEBUG
1080*52118Smckusick 	if (pmapdebug & PDB_FOLLOW)
1081*52118Smckusick 		printf("%x\n", pa);
1082*52118Smckusick #endif
1083*52118Smckusick 	return(pa);
1084*52118Smckusick }
1085*52118Smckusick 
1086*52118Smckusick /*
1087*52118Smckusick  *	Copy the range specified by src_addr/len
1088*52118Smckusick  *	from the source map to the range dst_addr/len
1089*52118Smckusick  *	in the destination map.
1090*52118Smckusick  *
1091*52118Smckusick  *	This routine is only advisory and need not do anything.
1092*52118Smckusick  */
1093*52118Smckusick void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
1094*52118Smckusick 	pmap_t dst_pmap;
1095*52118Smckusick 	pmap_t src_pmap;
1096*52118Smckusick 	vm_offset_t dst_addr;
1097*52118Smckusick 	vm_size_t len;
1098*52118Smckusick 	vm_offset_t src_addr;
1099*52118Smckusick {
1100*52118Smckusick 
1101*52118Smckusick #ifdef DEBUG
1102*52118Smckusick 	if (pmapdebug & PDB_FOLLOW)
1103*52118Smckusick 		printf("pmap_copy(%x, %x, %x, %x, %x)\n",
1104*52118Smckusick 		       dst_pmap, src_pmap, dst_addr, len, src_addr);
1105*52118Smckusick #endif
1106*52118Smckusick }
1107*52118Smckusick 
1108*52118Smckusick /*
1109*52118Smckusick  *	Require that all active physical maps contain no
1110*52118Smckusick  *	incorrect entries NOW.  [This update includes
1111*52118Smckusick  *	forcing updates of any address map caching.]
1112*52118Smckusick  *
1113*52118Smckusick  *	Generally used to insure that a thread about
1114*52118Smckusick  *	to run will see a semantically correct world.
1115*52118Smckusick  */
1116*52118Smckusick void pmap_update()
1117*52118Smckusick {
1118*52118Smckusick 
1119*52118Smckusick #ifdef DEBUG
1120*52118Smckusick 	if (pmapdebug & PDB_FOLLOW)
1121*52118Smckusick 		printf("pmap_update()\n");
1122*52118Smckusick #endif
1123*52118Smckusick }
1124*52118Smckusick 
1125*52118Smckusick /*
1126*52118Smckusick  *	Routine:	pmap_collect
1127*52118Smckusick  *	Function:
1128*52118Smckusick  *		Garbage collects the physical map system for
1129*52118Smckusick  *		pages which are no longer used.
1130*52118Smckusick  *		Success need not be guaranteed -- that is, there
1131*52118Smckusick  *		may well be pages which are not referenced, but
1132*52118Smckusick  *		others may be collected.
1133*52118Smckusick  *	Usage:
1134*52118Smckusick  *		Called by the pageout daemon when pages are scarce.
1135*52118Smckusick  */
1136*52118Smckusick void
1137*52118Smckusick pmap_collect(pmap)
1138*52118Smckusick 	pmap_t pmap;
1139*52118Smckusick {
1140*52118Smckusick 
1141*52118Smckusick #ifdef DEBUG
1142*52118Smckusick 	if (pmapdebug & PDB_FOLLOW)
1143*52118Smckusick 		printf("pmap_collect(%x)\n", pmap);
1144*52118Smckusick #endif
1145*52118Smckusick }
1146*52118Smckusick 
1147*52118Smckusick /*
1148*52118Smckusick  *	pmap_zero_page zeros the specified (machine independent)
1149*52118Smckusick  *	page.
1150*52118Smckusick  */
1151*52118Smckusick void
1152*52118Smckusick pmap_zero_page(phys)
1153*52118Smckusick 	register vm_offset_t phys;
1154*52118Smckusick {
1155*52118Smckusick 	register vm_offset_t end;
1156*52118Smckusick 
1157*52118Smckusick #ifdef DEBUG
1158*52118Smckusick 	if (pmapdebug & PDB_FOLLOW)
1159*52118Smckusick 		printf("pmap_zero_page(%x)\n", phys);
1160*52118Smckusick #endif
1161*52118Smckusick 	end = phys + PAGE_SIZE;
1162*52118Smckusick 	do {
1163*52118Smckusick 		((unsigned *)phys)[0] = 0;
1164*52118Smckusick 		((unsigned *)phys)[1] = 0;
1165*52118Smckusick 		((unsigned *)phys)[2] = 0;
1166*52118Smckusick 		((unsigned *)phys)[3] = 0;
1167*52118Smckusick 		phys += 4 * sizeof(unsigned);
1168*52118Smckusick 	} while (phys != end);
1169*52118Smckusick }
1170*52118Smckusick 
1171*52118Smckusick /*
1172*52118Smckusick  *	pmap_copy_page copies the specified (machine independent)
1173*52118Smckusick  *	page.
1174*52118Smckusick  */
1175*52118Smckusick void
1176*52118Smckusick pmap_copy_page(src, dst)
1177*52118Smckusick 	register vm_offset_t src, dst;
1178*52118Smckusick {
1179*52118Smckusick 	register vm_offset_t end;
1180*52118Smckusick 	register unsigned tmp0, tmp1, tmp2, tmp3;
1181*52118Smckusick 
1182*52118Smckusick #ifdef DEBUG
1183*52118Smckusick 	if (pmapdebug & PDB_FOLLOW)
1184*52118Smckusick 		printf("pmap_copy_page(%x, %x)\n", src, dst);
1185*52118Smckusick #endif
1186*52118Smckusick 	end = src + PAGE_SIZE;
1187*52118Smckusick 	do {
1188*52118Smckusick 		tmp0 = ((unsigned *)src)[0];
1189*52118Smckusick 		tmp1 = ((unsigned *)src)[1];
1190*52118Smckusick 		tmp2 = ((unsigned *)src)[2];
1191*52118Smckusick 		tmp3 = ((unsigned *)src)[3];
1192*52118Smckusick 		((unsigned *)dst)[0] = tmp0;
1193*52118Smckusick 		((unsigned *)dst)[1] = tmp1;
1194*52118Smckusick 		((unsigned *)dst)[2] = tmp2;
1195*52118Smckusick 		((unsigned *)dst)[3] = tmp3;
1196*52118Smckusick 		src += 4 * sizeof(unsigned);
1197*52118Smckusick 		dst += 4 * sizeof(unsigned);
1198*52118Smckusick 	} while (src != end);
1199*52118Smckusick }
1200*52118Smckusick 
1201*52118Smckusick /*
1202*52118Smckusick  *	Routine:	pmap_pageable
1203*52118Smckusick  *	Function:
1204*52118Smckusick  *		Make the specified pages (by pmap, offset)
1205*52118Smckusick  *		pageable (or not) as requested.
1206*52118Smckusick  *
1207*52118Smckusick  *		A page which is not pageable may not take
1208*52118Smckusick  *		a fault; therefore, its page table entry
1209*52118Smckusick  *		must remain valid for the duration.
1210*52118Smckusick  *
1211*52118Smckusick  *		This routine is merely advisory; pmap_enter
1212*52118Smckusick  *		will specify that these pages are to be wired
1213*52118Smckusick  *		down (or not) as appropriate.
1214*52118Smckusick  */
1215*52118Smckusick void
1216*52118Smckusick pmap_pageable(pmap, sva, eva, pageable)
1217*52118Smckusick 	pmap_t		pmap;
1218*52118Smckusick 	vm_offset_t	sva, eva;
1219*52118Smckusick 	boolean_t	pageable;
1220*52118Smckusick {
1221*52118Smckusick 
1222*52118Smckusick #ifdef DEBUG
1223*52118Smckusick 	if (pmapdebug & PDB_FOLLOW)
1224*52118Smckusick 		printf("pmap_pageable(%x, %x, %x, %x)\n",
1225*52118Smckusick 		       pmap, sva, eva, pageable);
1226*52118Smckusick #endif
1227*52118Smckusick }
1228*52118Smckusick 
1229*52118Smckusick /*
1230*52118Smckusick  *	Clear the modify bits on the specified physical page.
1231*52118Smckusick  */
1232*52118Smckusick void
1233*52118Smckusick pmap_clear_modify(pa)
1234*52118Smckusick 	vm_offset_t pa;
1235*52118Smckusick {
1236*52118Smckusick 	pmap_hash_t hp;
1237*52118Smckusick 
1238*52118Smckusick #ifdef DEBUG
1239*52118Smckusick 	if (pmapdebug & PDB_FOLLOW)
1240*52118Smckusick 		printf("pmap_clear_modify(%x)\n", pa);
1241*52118Smckusick #endif
1242*52118Smckusick #ifdef ATTR
1243*52118Smckusick 	pmap_attributes[atop(pa - KERNBASE)] &= ~PMAP_ATTR_MOD;
1244*52118Smckusick #endif
1245*52118Smckusick }
1246*52118Smckusick 
1247*52118Smckusick /*
1248*52118Smckusick  *	pmap_clear_reference:
1249*52118Smckusick  *
1250*52118Smckusick  *	Clear the reference bit on the specified physical page.
1251*52118Smckusick  */
1252*52118Smckusick void
1253*52118Smckusick pmap_clear_reference(pa)
1254*52118Smckusick 	vm_offset_t pa;
1255*52118Smckusick {
1256*52118Smckusick 
1257*52118Smckusick #ifdef DEBUG
1258*52118Smckusick 	if (pmapdebug & PDB_FOLLOW)
1259*52118Smckusick 		printf("pmap_clear_reference(%x)\n", pa);
1260*52118Smckusick #endif
1261*52118Smckusick #ifdef ATTR
1262*52118Smckusick 	pmap_attributes[atop(pa - KERNBASE)] &= ~PMAP_ATTR_REF;
1263*52118Smckusick #endif
1264*52118Smckusick }
1265*52118Smckusick 
1266*52118Smckusick /*
1267*52118Smckusick  *	pmap_is_referenced:
1268*52118Smckusick  *
1269*52118Smckusick  *	Return whether or not the specified physical page is referenced
1270*52118Smckusick  *	by any physical maps.
1271*52118Smckusick  */
1272*52118Smckusick boolean_t
1273*52118Smckusick pmap_is_referenced(pa)
1274*52118Smckusick 	vm_offset_t pa;
1275*52118Smckusick {
1276*52118Smckusick #ifdef ATTR
1277*52118Smckusick 	return(pmap_attributes[atop(pa - KERNBASE)] & PMAP_ATTR_REF);
1278*52118Smckusick #else
1279*52118Smckusick 	return(FALSE);
1280*52118Smckusick #endif
1281*52118Smckusick }
1282*52118Smckusick 
1283*52118Smckusick /*
1284*52118Smckusick  *	pmap_is_modified:
1285*52118Smckusick  *
1286*52118Smckusick  *	Return whether or not the specified physical page is modified
1287*52118Smckusick  *	by any physical maps.
1288*52118Smckusick  */
1289*52118Smckusick boolean_t
1290*52118Smckusick pmap_is_modified(pa)
1291*52118Smckusick 	vm_offset_t pa;
1292*52118Smckusick {
1293*52118Smckusick #ifdef ATTR
1294*52118Smckusick 	return(pmap_attributes[atop(pa - KERNBASE)] & PMAP_ATTR_MOD);
1295*52118Smckusick #else
1296*52118Smckusick 	return(FALSE);
1297*52118Smckusick #endif
1298*52118Smckusick }
1299*52118Smckusick 
1300*52118Smckusick vm_offset_t
1301*52118Smckusick pmap_phys_address(ppn)
1302*52118Smckusick 	int ppn;
1303*52118Smckusick {
1304*52118Smckusick 
1305*52118Smckusick #ifdef DEBUG
1306*52118Smckusick 	if (pmapdebug & PDB_FOLLOW)
1307*52118Smckusick 		printf("pmap_phys_address(%x)\n", ppn);
1308*52118Smckusick #endif
1309*52118Smckusick 	panic("pmap_phys_address"); /* XXX */
1310*52118Smckusick 	return(pmax_ptob(ppn));
1311*52118Smckusick }
1312*52118Smckusick 
1313*52118Smckusick /*
1314*52118Smckusick  * Miscellaneous support routines
1315*52118Smckusick  */
1316*52118Smckusick 
1317*52118Smckusick /*
1318*52118Smckusick  * Allocate a hardware PID and return it.
1319*52118Smckusick  * Also, change the hardwired TLB entry to point to the current TLB cache.
1320*52118Smckusick  * This is called by swtch().
1321*52118Smckusick  */
1322*52118Smckusick int
1323*52118Smckusick pmap_alloc_tlbpid(p)
1324*52118Smckusick 	register struct proc *p;
1325*52118Smckusick {
1326*52118Smckusick 	register pmap_t pmap;
1327*52118Smckusick 	register u_int i;
1328*52118Smckusick 	register int id;
1329*52118Smckusick 
1330*52118Smckusick 	pmap = &p->p_vmspace->vm_pmap;
1331*52118Smckusick 	if ((id = pmap->pm_tlbpid) >= 0) {
1332*52118Smckusick 		if (pmap->pm_flags & PM_MODIFIED) {
1333*52118Smckusick 			pmap->pm_flags &= ~PM_MODIFIED;
1334*52118Smckusick 			MachTLBFlushPID(id);
1335*52118Smckusick 		}
1336*52118Smckusick 		goto done;
1337*52118Smckusick 	}
1338*52118Smckusick 
1339*52118Smckusick 	if ((i = whichpids[0]) != 0xFFFFFFFF)
1340*52118Smckusick 		id = 0;
1341*52118Smckusick 	else if ((i = whichpids[1]) != 0xFFFFFFFF)
1342*52118Smckusick 		id = 32;
1343*52118Smckusick 	else {
1344*52118Smckusick 		register struct proc *q;
1345*52118Smckusick 		register pmap_t q_pmap;
1346*52118Smckusick 
1347*52118Smckusick 		/*
1348*52118Smckusick 		 * Have to find a tlbpid to recycle.
1349*52118Smckusick 		 * There is probably a better way to do this.
1350*52118Smckusick 		 */
1351*52118Smckusick 		for (q = allproc; q != NULL; q = q->p_nxt) {
1352*52118Smckusick 			q_pmap = &q->p_vmspace->vm_pmap;
1353*52118Smckusick 			if ((id = q_pmap->pm_tlbpid) < 0)
1354*52118Smckusick 				continue;
1355*52118Smckusick 			if (q->p_stat != SRUN)
1356*52118Smckusick 				goto fnd;
1357*52118Smckusick 		}
1358*52118Smckusick 		if (id < 0)
1359*52118Smckusick 			panic("TLBPidAlloc");
1360*52118Smckusick 	fnd:
1361*52118Smckusick 		printf("pmap_alloc_tlbpid: recycle pid %d (%s) tlbpid %d\n",
1362*52118Smckusick 			q->p_pid, q->p_comm, id); /* XXX */
1363*52118Smckusick 		/*
1364*52118Smckusick 		 * Even though the virtual to physical mapping hasn't changed,
1365*52118Smckusick 		 * we need to clear the PID tag in the high entry of the cache.
1366*52118Smckusick 		 */
1367*52118Smckusick 		if (q_pmap->pm_hash != zero_pmap_hash) {
1368*52118Smckusick 			register pmap_hash_t hp;
1369*52118Smckusick 
1370*52118Smckusick 			hp = q_pmap->pm_hash;
1371*52118Smckusick 			for (i = 0; i < PMAP_HASH_NUM_ENTRIES; i++, hp++) {
1372*52118Smckusick 				if (!hp->high)
1373*52118Smckusick 					continue;
1374*52118Smckusick 
1375*52118Smckusick 				if (hp->low & PG_WIRED) {
1376*52118Smckusick 					printf("Clearing wired user entry! h %x l %x\n", hp->high, hp->low);
1377*52118Smckusick 					panic("pmap_alloc_tlbpid: wired");
1378*52118Smckusick 				}
1379*52118Smckusick 				pmap_remove_pv(pmap, hp->high & PG_FRAME,
1380*52118Smckusick 					hp->low & PG_FRAME);
1381*52118Smckusick 				hp->high = 0;
1382*52118Smckusick 				q_pmap->pm_stats.resident_count--;
1383*52118Smckusick 			}
1384*52118Smckusick 		}
1385*52118Smckusick 		q_pmap->pm_tlbpid = -1;
1386*52118Smckusick 		MachTLBFlushPID(id);
1387*52118Smckusick #ifdef DEBUG
1388*52118Smckusick 		remove_stats.pidflushes++;
1389*52118Smckusick #endif
1390*52118Smckusick 		pmap->pm_tlbpid = id;
1391*52118Smckusick 		goto done;
1392*52118Smckusick 	}
1393*52118Smckusick 	while (i & 1) {
1394*52118Smckusick 		i >>= 1;
1395*52118Smckusick 		id++;
1396*52118Smckusick 	}
1397*52118Smckusick 	whichpids[id >> 5] |= 1 << (id & 0x1F);
1398*52118Smckusick 	pmap->pm_tlbpid = id;
1399*52118Smckusick done:
1400*52118Smckusick 	/*
1401*52118Smckusick 	 * Map in new TLB cache.
1402*52118Smckusick 	 */
1403*52118Smckusick 	if (pmap == cur_pmap)
1404*52118Smckusick 		return (id);
1405*52118Smckusick 	cur_pmap = pmap;
1406*52118Smckusick 	for (i = 0; i < PMAP_HASH_UPAGES; i++) {
1407*52118Smckusick 		MachTLBWriteIndexed(i + UPAGES,
1408*52118Smckusick 			(PMAP_HASH_UADDR + (i << PGSHIFT)) |
1409*52118Smckusick 				(id << VMMACH_TLB_PID_SHIFT),
1410*52118Smckusick 			pmap->pm_hash_ptes[i]);
1411*52118Smckusick 	}
1412*52118Smckusick 	return (id);
1413*52118Smckusick }
1414*52118Smckusick 
1415*52118Smckusick /*
1416*52118Smckusick  * Remove a physical to virtual address translation.
1417*52118Smckusick  */
1418*52118Smckusick void
1419*52118Smckusick pmap_remove_pv(pmap, va, pa)
1420*52118Smckusick 	pmap_t pmap;
1421*52118Smckusick 	vm_offset_t va, pa;
1422*52118Smckusick {
1423*52118Smckusick 	register pv_entry_t pv, npv;
1424*52118Smckusick 	int s;
1425*52118Smckusick 
1426*52118Smckusick #ifdef DEBUG
1427*52118Smckusick 	if (pmapdebug & PDB_FOLLOW)
1428*52118Smckusick 		printf("pmap_remove_pv(%x, %x, %x)\n", pmap, va, pa);
1429*52118Smckusick #endif
1430*52118Smckusick 	/*
1431*52118Smckusick 	 * Remove page from the PV table (raise IPL since we
1432*52118Smckusick 	 * may be called at interrupt time).
1433*52118Smckusick 	 */
1434*52118Smckusick 	if (!IS_VM_PHYSADDR(pa))
1435*52118Smckusick 		return;
1436*52118Smckusick 	pv = pa_to_pvh(pa);
1437*52118Smckusick 	s = splimp();
1438*52118Smckusick 	/*
1439*52118Smckusick 	 * If it is the first entry on the list, it is actually
1440*52118Smckusick 	 * in the header and we must copy the following entry up
1441*52118Smckusick 	 * to the header.  Otherwise we must search the list for
1442*52118Smckusick 	 * the entry.  In either case we free the now unused entry.
1443*52118Smckusick 	 */
1444*52118Smckusick 	if (pmap == pv->pv_pmap && va == pv->pv_va) {
1445*52118Smckusick 		npv = pv->pv_next;
1446*52118Smckusick 		if (npv) {
1447*52118Smckusick 			*pv = *npv;
1448*52118Smckusick 			free((caddr_t)npv, M_VMPVENT);
1449*52118Smckusick 		} else
1450*52118Smckusick 			pv->pv_pmap = NULL;
1451*52118Smckusick #ifdef DEBUG
1452*52118Smckusick 		remove_stats.pvfirst++;
1453*52118Smckusick #endif
1454*52118Smckusick 	} else {
1455*52118Smckusick 		for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
1456*52118Smckusick #ifdef DEBUG
1457*52118Smckusick 			remove_stats.pvsearch++;
1458*52118Smckusick #endif
1459*52118Smckusick 			if (pmap == npv->pv_pmap && va == npv->pv_va)
1460*52118Smckusick 				goto fnd;
1461*52118Smckusick 		}
1462*52118Smckusick #ifdef DIAGNOSTIC
1463*52118Smckusick 		printf("pmap_remove_pv(%x, %x, %x) not found\n", pmap, va, pa);
1464*52118Smckusick 		panic("pmap_remove_pv");
1465*52118Smckusick #endif
1466*52118Smckusick 	fnd:
1467*52118Smckusick 		pv->pv_next = npv->pv_next;
1468*52118Smckusick 		free((caddr_t)npv, M_VMPVENT);
1469*52118Smckusick 	}
1470*52118Smckusick 	splx(s);
1471*52118Smckusick }
1472*52118Smckusick 
1473*52118Smckusick #ifdef DEBUG
1474*52118Smckusick pmap_print(pmap)
1475*52118Smckusick 	pmap_t pmap;
1476*52118Smckusick {
1477*52118Smckusick 	register pmap_hash_t hp;
1478*52118Smckusick 	register int i;
1479*52118Smckusick 
1480*52118Smckusick 	printf("\tpmap_print(%x)\n", pmap);
1481*52118Smckusick 
1482*52118Smckusick 	if (pmap->pm_hash == zero_pmap_hash) {
1483*52118Smckusick 		printf("pm_hash == zero\n");
1484*52118Smckusick 		return;
1485*52118Smckusick 	}
1486*52118Smckusick 	if (pmap->pm_hash == (pmap_hash_t)0) {
1487*52118Smckusick 		printf("pm_hash == kernel\n");
1488*52118Smckusick 		return;
1489*52118Smckusick 	}
1490*52118Smckusick 	hp = pmap->pm_hash;
1491*52118Smckusick 	for (i = 0; i < PMAP_HASH_NUM_ENTRIES; i++, hp++) {
1492*52118Smckusick 		if (!hp->high)
1493*52118Smckusick 			continue;
1494*52118Smckusick 		printf("%d: hi %x low %x\n", i, hp->high, hp->low);
1495*52118Smckusick 	}
1496*52118Smckusick }
1497*52118Smckusick #endif
1498