xref: /csrg-svn/sys/pmax/pmax/pmap.c (revision 56633)
152118Smckusick /*
252118Smckusick  * Copyright (c) 1992 The Regents of the University of California.
352118Smckusick  * All rights reserved.
452118Smckusick  *
552118Smckusick  * This code is derived from software contributed to Berkeley by
652118Smckusick  * the Systems Programming Group of the University of Utah Computer
752118Smckusick  * Science Department and Ralph Campbell.
852118Smckusick  *
952118Smckusick  * %sccs.include.redist.c%
1052118Smckusick  *
11*56633Sralph  *	@(#)pmap.c	7.12 (Berkeley) 10/24/92
1252118Smckusick  */
1352118Smckusick 
1452118Smckusick /*
1552118Smckusick  *	Manages physical address maps.
1652118Smckusick  *
1752118Smckusick  *	In addition to hardware address maps, this
1852118Smckusick  *	module is called upon to provide software-use-only
1952118Smckusick  *	maps which may or may not be stored in the same
2052118Smckusick  *	form as hardware maps.  These pseudo-maps are
2152118Smckusick  *	used to store intermediate results from copy
2252118Smckusick  *	operations to and from address spaces.
2352118Smckusick  *
2452118Smckusick  *	Since the information managed by this module is
2552118Smckusick  *	also stored by the logical address mapping module,
2652118Smckusick  *	this module may throw away valid virtual-to-physical
2752118Smckusick  *	mappings at almost any time.  However, invalidations
2852118Smckusick  *	of virtual-to-physical mappings must be done as
2952118Smckusick  *	requested.
3052118Smckusick  *
3152118Smckusick  *	In order to cope with hardware architectures which
3252118Smckusick  *	make virtual-to-physical map invalidates expensive,
3352118Smckusick  *	this module may delay invalidate or reduced protection
3452118Smckusick  *	operations until such time as they are actually
3552118Smckusick  *	necessary.  This module is given full information as
3652118Smckusick  *	to which processors are currently using which maps,
3752118Smckusick  *	and to when physical maps must be made correct.
3852118Smckusick  */
3952118Smckusick 
4056524Sbostic #include <sys/param.h>
4156524Sbostic #include <sys/proc.h>
4256524Sbostic #include <sys/malloc.h>
4356524Sbostic #include <sys/user.h>
4452118Smckusick 
4556524Sbostic #include <vm/vm.h>
4656524Sbostic #include <vm/vm_kern.h>
4756524Sbostic #include <vm/vm_page.h>
4852118Smckusick 
4956524Sbostic #include <machine/machConst.h>
5056524Sbostic #include <machine/pte.h>
5152118Smckusick 
5252118Smckusick /*
5352118Smckusick  * For each vm_page_t, there is a list of all currently valid virtual
5452118Smckusick  * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
5552118Smckusick  * XXX really should do this as a part of the higher level code.
5652118Smckusick  */
5752118Smckusick typedef struct pv_entry {
5852118Smckusick 	struct pv_entry	*pv_next;	/* next pv_entry */
5952118Smckusick 	struct pmap	*pv_pmap;	/* pmap where mapping lies */
6052118Smckusick 	vm_offset_t	pv_va;		/* virtual address for mapping */
6152118Smckusick } *pv_entry_t;
6252118Smckusick 
6352118Smckusick pv_entry_t	pv_table;	/* array of entries, one per page */
6452118Smckusick extern void	pmap_remove_pv();
6552118Smckusick 
6652118Smckusick #define pa_index(pa)		atop((pa) - first_phys_addr)
6752118Smckusick #define pa_to_pvh(pa)		(&pv_table[pa_index(pa)])
6852118Smckusick 
6952118Smckusick #ifdef DEBUG
7052118Smckusick struct {
7152118Smckusick 	int kernel;	/* entering kernel mapping */
7252118Smckusick 	int user;	/* entering user mapping */
7352118Smckusick 	int ptpneeded;	/* needed to allocate a PT page */
7452118Smckusick 	int pwchange;	/* no mapping change, just wiring or protection */
7552118Smckusick 	int wchange;	/* no mapping change, just wiring */
7652118Smckusick 	int mchange;	/* was mapped but mapping to different page */
7752118Smckusick 	int managed;	/* a managed page */
7852118Smckusick 	int firstpv;	/* first mapping for this PA */
7952118Smckusick 	int secondpv;	/* second mapping for this PA */
8052118Smckusick 	int ci;		/* cache inhibited */
8152118Smckusick 	int unmanaged;	/* not a managed page */
8252118Smckusick 	int flushes;	/* cache flushes */
8352118Smckusick 	int cachehit;	/* new entry forced valid entry out */
8452118Smckusick } enter_stats;
8552118Smckusick struct {
8652118Smckusick 	int calls;
8752118Smckusick 	int removes;
8852118Smckusick 	int flushes;
8952118Smckusick 	int pidflushes;	/* HW pid stolen */
9052118Smckusick 	int pvfirst;
9152118Smckusick 	int pvsearch;
9252118Smckusick } remove_stats;
9352118Smckusick 
9452118Smckusick int pmapdebug;
9552118Smckusick #define PDB_FOLLOW	0x0001
9652118Smckusick #define PDB_INIT	0x0002
9752118Smckusick #define PDB_ENTER	0x0004
9852118Smckusick #define PDB_REMOVE	0x0008
9952118Smckusick #define PDB_CREATE	0x0010
10052118Smckusick #define PDB_PTPAGE	0x0020
10152118Smckusick #define PDB_CACHE	0x0040
10252118Smckusick #define PDB_BITS	0x0080
10352118Smckusick #define PDB_COLLECT	0x0100
10452118Smckusick #define PDB_PROTECT	0x0200
10552118Smckusick #define PDB_TLBPID	0x0400
10652118Smckusick #define PDB_PARANOIA	0x2000
10752118Smckusick #define PDB_WIRING	0x4000
10852118Smckusick #define PDB_PVDUMP	0x8000
10952118Smckusick 
11052118Smckusick #endif /* DEBUG */
11152118Smckusick 
11252118Smckusick u_int	whichpids[2] = {	/* bit mask of hardware PID's in use */
11352118Smckusick 	3, 0
11452118Smckusick };
11552118Smckusick 
11652118Smckusick struct pmap	kernel_pmap_store;
11752118Smckusick pmap_t		cur_pmap;	/* current pmap mapped in hardware */
11852118Smckusick 
11952118Smckusick vm_offset_t    	avail_start;	/* PA of first available physical page */
12052118Smckusick vm_offset_t	avail_end;	/* PA of last available physical page */
12152118Smckusick vm_size_t	mem_size;	/* memory size in bytes */
12252118Smckusick vm_offset_t	virtual_avail;  /* VA of first avail page (after kernel bss)*/
12352118Smckusick vm_offset_t	virtual_end;	/* VA of last avail page (end of kernel AS) */
12452118Smckusick int		pmaxpagesperpage;	/* PAGE_SIZE / NBPG */
12552118Smckusick #ifdef ATTR
12652118Smckusick char		*pmap_attributes;	/* reference and modify bits */
12752118Smckusick #endif
12852118Smckusick pmap_hash_t	zero_pmap_hash;		/* empty TLB hash table for init */
12952118Smckusick 
13052118Smckusick /*
13152118Smckusick  *	Bootstrap the system enough to run with virtual memory.
13252118Smckusick  */
13352118Smckusick void
13452118Smckusick pmap_bootstrap(firstaddr)
13552118Smckusick 	vm_offset_t firstaddr;
13652118Smckusick {
13752118Smckusick 	register int i;
13852118Smckusick 	vm_offset_t start = firstaddr;
13952118Smckusick 	extern int maxmem, physmem;
14052118Smckusick 
14152118Smckusick 	/*
14252118Smckusick 	 * Allocate a TLB hash table for the kernel.
14352118Smckusick 	 * This could be a KSEG0 address and thus save TLB entries but
14452118Smckusick 	 * its faster and simpler in assembly language to have a
14552118Smckusick 	 * fixed address that can be accessed with a 16 bit signed offset.
14652118Smckusick 	 * Note: the kernel pm_hash field is null, user pm_hash fields are
14752118Smckusick 	 * either the table or zero_pmap_hash.
14852118Smckusick 	 */
14952118Smckusick 	kernel_pmap_store.pm_hash = (pmap_hash_t)0;
15052118Smckusick 	for (i = 0; i < PMAP_HASH_KPAGES; i++) {
15152118Smckusick 		MachTLBWriteIndexed(i + UPAGES + PMAP_HASH_UPAGES,
15252118Smckusick 			PMAP_HASH_KADDR + (i << PGSHIFT),
15352118Smckusick 			firstaddr | PG_V | PG_M | PG_G);
15452118Smckusick 		firstaddr += NBPG;
15552118Smckusick 	}
15652118Smckusick 
15752118Smckusick 	/*
15852118Smckusick 	 * Allocate an empty TLB hash table for initial pmap's.
15952118Smckusick 	 */
16052741Sralph 	zero_pmap_hash = (pmap_hash_t)MACH_PHYS_TO_CACHED(firstaddr);
16152118Smckusick 
16252118Smckusick 	/* init proc[0]'s pmap hash table */
16352118Smckusick 	for (i = 0; i < PMAP_HASH_UPAGES; i++) {
16452937Sralph 		kernel_pmap_store.pm_hash_ptes[i] = firstaddr | PG_V | PG_RO;
16552118Smckusick 		MachTLBWriteIndexed(i + UPAGES,
16652118Smckusick 			(PMAP_HASH_UADDR + (i << PGSHIFT)) |
16752118Smckusick 				(1 << VMMACH_TLB_PID_SHIFT),
16852118Smckusick 			kernel_pmap_store.pm_hash_ptes[i]);
16952937Sralph 		firstaddr += NBPG;
17052118Smckusick 	}
17152118Smckusick 
17252118Smckusick 	/*
17352118Smckusick 	 * Allocate memory for pv_table.
17452118Smckusick 	 * This will allocate more entries than we really need.
17552118Smckusick 	 * We should do this in pmap_init when we know the actual
17652118Smckusick 	 * phys_start and phys_end but its better to use phys addresses
17752118Smckusick 	 * rather than kernel virtual addresses mapped through the TLB.
17852118Smckusick 	 */
17952118Smckusick 	i = (maxmem - pmax_btop(firstaddr)) * sizeof(struct pv_entry);
18052118Smckusick 	i = pmax_round_page(i);
18152741Sralph 	pv_table = (pv_entry_t)MACH_PHYS_TO_CACHED(firstaddr);
18252118Smckusick 	firstaddr += i;
18352118Smckusick 
18452118Smckusick 	/*
18552118Smckusick 	 * Clear allocated memory.
18652118Smckusick 	 */
18752741Sralph 	bzero((caddr_t)MACH_PHYS_TO_CACHED(start), firstaddr - start);
18852118Smckusick 
18952118Smckusick 	avail_start = firstaddr;
19052118Smckusick 	avail_end = pmax_ptob(maxmem);
19152118Smckusick 	mem_size = avail_end - avail_start;
19252118Smckusick 
19352118Smckusick 	virtual_avail = VM_MIN_KERNEL_ADDRESS;
19452118Smckusick 	virtual_end = VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES * NPTEPG * NBPG;
19552118Smckusick 	/* XXX need to decide how to set cnt.v_page_size */
19652118Smckusick 	pmaxpagesperpage = 1;
19752118Smckusick 
19852606Smckusick 	cur_pmap = &kernel_pmap_store;
19952937Sralph 	simple_lock_init(&kernel_pmap_store.pm_lock);
20052937Sralph 	kernel_pmap_store.pm_count = 1;
20152118Smckusick }
20252118Smckusick 
20352118Smckusick /*
20452118Smckusick  * Bootstrap memory allocator. This function allows for early dynamic
20552118Smckusick  * memory allocation until the virtual memory system has been bootstrapped.
20652118Smckusick  * After that point, either kmem_alloc or malloc should be used. This
20752118Smckusick  * function works by stealing pages from the (to be) managed page pool,
20852118Smckusick  * stealing virtual address space, then mapping the pages and zeroing them.
20952118Smckusick  *
21052118Smckusick  * It should be used from pmap_bootstrap till vm_page_startup, afterwards
21152118Smckusick  * it cannot be used, and will generate a panic if tried. Note that this
21252118Smckusick  * memory will never be freed, and in essence it is wired down.
21352118Smckusick  */
21452118Smckusick void *
21552118Smckusick pmap_bootstrap_alloc(size)
21652118Smckusick 	int size;
21752118Smckusick {
21852118Smckusick 	vm_offset_t val;
21952118Smckusick 	extern boolean_t vm_page_startup_initialized;
22052118Smckusick 
22152118Smckusick 	if (vm_page_startup_initialized)
22252118Smckusick 		panic("pmap_bootstrap_alloc: called after startup initialized");
22352118Smckusick 
22452741Sralph 	val = MACH_PHYS_TO_CACHED(avail_start);
22552118Smckusick 	size = round_page(size);
22652118Smckusick 	avail_start += size;
22752118Smckusick 
22852741Sralph 	blkclr((caddr_t)val, size);
22952741Sralph 	return ((void *)val);
23052118Smckusick }
23152118Smckusick 
23252118Smckusick /*
23352118Smckusick  *	Initialize the pmap module.
23452118Smckusick  *	Called by vm_init, to initialize any structures that the pmap
23552118Smckusick  *	system needs to map virtual memory.
23652118Smckusick  */
23752118Smckusick void
23852118Smckusick pmap_init(phys_start, phys_end)
23952118Smckusick 	vm_offset_t phys_start, phys_end;
24052118Smckusick {
24152118Smckusick 
24252118Smckusick #ifdef DEBUG
24352118Smckusick 	if (pmapdebug & PDB_FOLLOW)
24452118Smckusick 		printf("pmap_init(%x, %x)\n", phys_start, phys_end);
24552118Smckusick #endif
24652118Smckusick }
24752118Smckusick 
24852118Smckusick /*
24952118Smckusick  *	Used to map a range of physical addresses into kernel
25052118Smckusick  *	virtual address space.
25152118Smckusick  *
25252118Smckusick  *	This routine should only be called by vm_page_startup()
25352118Smckusick  *	with KSEG0 addresses.
25452118Smckusick  */
25552118Smckusick vm_offset_t
25652118Smckusick pmap_map(virt, start, end, prot)
25752118Smckusick 	vm_offset_t virt;
25852118Smckusick 	vm_offset_t start;
25952118Smckusick 	vm_offset_t end;
26052118Smckusick 	int prot;
26152118Smckusick {
26252118Smckusick 
26352118Smckusick #ifdef DEBUG
26452118Smckusick 	if (pmapdebug & PDB_FOLLOW)
26552118Smckusick 		printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot);
26652118Smckusick #endif
26752118Smckusick 
26852741Sralph 	return (round_page(end));
26952118Smckusick }
27052118Smckusick 
27152118Smckusick /*
27252118Smckusick  *	Create and return a physical map.
27352118Smckusick  *
27452118Smckusick  *	If the size specified for the map
27552118Smckusick  *	is zero, the map is an actual physical
27652118Smckusick  *	map, and may be referenced by the
27752118Smckusick  *	hardware.
27852118Smckusick  *
27952118Smckusick  *	If the size specified is non-zero,
28052118Smckusick  *	the map will be used in software only, and
28152118Smckusick  *	is bounded by that size.
28252118Smckusick  */
28352118Smckusick pmap_t
28452118Smckusick pmap_create(size)
28552118Smckusick 	vm_size_t size;
28652118Smckusick {
28752118Smckusick 	register pmap_t pmap;
28852118Smckusick 
28952118Smckusick #ifdef DEBUG
29052118Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
29152118Smckusick 		printf("pmap_create(%x)\n", size);
29252118Smckusick #endif
29352118Smckusick 	/*
29452118Smckusick 	 * Software use map does not need a pmap
29552118Smckusick 	 */
29652118Smckusick 	if (size)
29752741Sralph 		return (NULL);
29852118Smckusick 
29952118Smckusick 	printf("pmap_create(%x) XXX\n", size); /* XXX */
30052118Smckusick 	/* XXX: is it ok to wait here? */
30152118Smckusick 	pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
30252118Smckusick #ifdef notifwewait
30352118Smckusick 	if (pmap == NULL)
30452118Smckusick 		panic("pmap_create: cannot allocate a pmap");
30552118Smckusick #endif
30652118Smckusick 	bzero(pmap, sizeof(*pmap));
30752118Smckusick 	pmap_pinit(pmap);
30852118Smckusick 	return (pmap);
30952118Smckusick }
31052118Smckusick 
31152118Smckusick /*
31252118Smckusick  * Initialize a preallocated and zeroed pmap structure,
31352118Smckusick  * such as one in a vmspace structure.
31452118Smckusick  */
31552118Smckusick void
31652118Smckusick pmap_pinit(pmap)
31752118Smckusick 	register struct pmap *pmap;
31852118Smckusick {
31952118Smckusick 	register int i;
32052118Smckusick 	extern struct vmspace vmspace0;
32152118Smckusick 
32252118Smckusick #ifdef DEBUG
32352118Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
32452118Smckusick 		printf("pmap_pinit(%x)\n", pmap);
32552118Smckusick #endif
32652118Smckusick 	simple_lock_init(&pmap->pm_lock);
32752118Smckusick 	pmap->pm_count = 1;
32852118Smckusick 	pmap->pm_flags = 0;
32952118Smckusick 	pmap->pm_hash = zero_pmap_hash;
33052118Smckusick 	for (i = 0; i < PMAP_HASH_UPAGES; i++)
33152118Smckusick 		pmap->pm_hash_ptes[i] =
33252937Sralph 			(MACH_CACHED_TO_PHYS(zero_pmap_hash) + (i << PGSHIFT)) |
33352937Sralph 				PG_V | PG_RO;
33452118Smckusick 	if (pmap == &vmspace0.vm_pmap)
33552118Smckusick 		pmap->pm_tlbpid = 1;	/* preallocated in mach_init() */
33652118Smckusick 	else
33752118Smckusick 		pmap->pm_tlbpid = -1;	/* none allocated yet */
33852118Smckusick }
33952118Smckusick 
34052118Smckusick /*
34152118Smckusick  *	Retire the given physical map from service.
34252118Smckusick  *	Should only be called if the map contains
34352118Smckusick  *	no valid mappings.
34452118Smckusick  */
34552118Smckusick void
34652118Smckusick pmap_destroy(pmap)
34752118Smckusick 	register pmap_t pmap;
34852118Smckusick {
34952118Smckusick 	int count;
35052118Smckusick 
35152118Smckusick #ifdef DEBUG
35252118Smckusick 	if (pmapdebug & PDB_FOLLOW)
35352118Smckusick 		printf("pmap_destroy(%x)\n", pmap);
35452118Smckusick #endif
35552118Smckusick 	if (pmap == NULL)
35652118Smckusick 		return;
35752118Smckusick 
35852118Smckusick 	printf("pmap_destroy(%x) XXX\n", pmap); /* XXX */
35952118Smckusick 	simple_lock(&pmap->pm_lock);
36052118Smckusick 	count = --pmap->pm_count;
36152118Smckusick 	simple_unlock(&pmap->pm_lock);
36252118Smckusick 	if (count == 0) {
36352118Smckusick 		pmap_release(pmap);
36452118Smckusick 		free((caddr_t)pmap, M_VMPMAP);
36552118Smckusick 	}
36652118Smckusick }
36752118Smckusick 
36852118Smckusick /*
36952118Smckusick  * Release any resources held by the given physical map.
37052118Smckusick  * Called when a pmap initialized by pmap_pinit is being released.
37152118Smckusick  * Should only be called if the map contains no valid mappings.
37252118Smckusick  */
37352118Smckusick void
37452118Smckusick pmap_release(pmap)
37552118Smckusick 	register pmap_t pmap;
37652118Smckusick {
37752118Smckusick 	register int id;
37852118Smckusick #ifdef DIAGNOSTIC
37952118Smckusick 	register int i;
38052118Smckusick #endif
38152118Smckusick 
38252118Smckusick #ifdef DEBUG
38352118Smckusick 	if (pmapdebug & PDB_FOLLOW)
38452118Smckusick 		printf("pmap_release(%x)\n", pmap);
38552118Smckusick #endif
38652118Smckusick 
38752118Smckusick 	if (pmap->pm_hash && pmap->pm_hash != zero_pmap_hash) {
38852118Smckusick 		kmem_free(kernel_map, (vm_offset_t)pmap->pm_hash,
38952118Smckusick 			PMAP_HASH_SIZE);
39052118Smckusick 		pmap->pm_hash = zero_pmap_hash;
39152118Smckusick 	}
39252118Smckusick 	if ((id = pmap->pm_tlbpid) < 0)
39352118Smckusick 		return;
39452118Smckusick #ifdef DIAGNOSTIC
39552118Smckusick 	if (!(whichpids[id >> 5] & (1 << (id & 0x1F))))
39652118Smckusick 		panic("pmap_release: id free");
39752118Smckusick #endif
39852118Smckusick 	MachTLBFlushPID(id);
39952118Smckusick 	whichpids[id >> 5] &= ~(1 << (id & 0x1F));
40052118Smckusick 	pmap->pm_flags &= ~PM_MODIFIED;
40152118Smckusick 	pmap->pm_tlbpid = -1;
40252118Smckusick 	if (pmap == cur_pmap)
40352118Smckusick 		cur_pmap = (pmap_t)0;
40452118Smckusick #ifdef DIAGNOSTIC
40552118Smckusick 	/* invalidate user PTE cache */
40652118Smckusick 	for (i = 0; i < PMAP_HASH_UPAGES; i++)
40752118Smckusick 		MachTLBWriteIndexed(i + UPAGES, MACH_RESERVED_ADDR, 0);
40852118Smckusick #endif
40952118Smckusick }
41052118Smckusick 
41152118Smckusick /*
41252118Smckusick  *	Add a reference to the specified pmap.
41352118Smckusick  */
41452118Smckusick void
41552118Smckusick pmap_reference(pmap)
41652118Smckusick 	pmap_t pmap;
41752118Smckusick {
41852118Smckusick 
41952118Smckusick #ifdef DEBUG
42052118Smckusick 	if (pmapdebug & PDB_FOLLOW)
42152118Smckusick 		printf("pmap_reference(%x)\n", pmap);
42252118Smckusick #endif
42352118Smckusick 	if (pmap != NULL) {
42452118Smckusick 		simple_lock(&pmap->pm_lock);
42552118Smckusick 		pmap->pm_count++;
42652118Smckusick 		simple_unlock(&pmap->pm_lock);
42752118Smckusick 	}
42852118Smckusick }
42952118Smckusick 
43052118Smckusick /*
43152118Smckusick  *	Remove the given range of addresses from the specified map.
43252118Smckusick  *
43352118Smckusick  *	It is assumed that the start and end are properly
43452118Smckusick  *	rounded to the page size.
43552118Smckusick  */
43652118Smckusick void
43752118Smckusick pmap_remove(pmap, sva, eva)
43852118Smckusick 	register pmap_t pmap;
43952118Smckusick 	vm_offset_t sva, eva;
44052118Smckusick {
44152118Smckusick 	register vm_offset_t va;
44252118Smckusick 	register pv_entry_t pv, npv;
44353718Smckusick 	register int i;
44452118Smckusick 	pmap_hash_t hp;
44552118Smckusick 	unsigned entry;
44652118Smckusick 
44752118Smckusick #ifdef DEBUG
44852118Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
44952118Smckusick 		printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva);
45052118Smckusick 	remove_stats.calls++;
45152118Smckusick #endif
45252118Smckusick 	if (pmap == NULL)
45352118Smckusick 		return;
45452118Smckusick 
45552118Smckusick 	/* anything in the cache? */
45652118Smckusick 	if (pmap->pm_tlbpid < 0 || pmap->pm_hash == zero_pmap_hash)
45752118Smckusick 		return;
45852118Smckusick 
45952118Smckusick 	if (!pmap->pm_hash) {
46052118Smckusick 		register pt_entry_t *pte;
46152118Smckusick 
46252118Smckusick 		/* remove entries from kernel pmap */
46352741Sralph #ifdef DIAGNOSTIC
46452741Sralph 		if (sva < VM_MIN_KERNEL_ADDRESS ||
46552741Sralph 		    eva > VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES*NPTEPG*NBPG)
46652741Sralph 			panic("pmap_remove");
46752741Sralph #endif
46852118Smckusick 		pte = kvtopte(sva);
46952118Smckusick 		for (va = sva; va < eva; va += NBPG, pte++) {
47052118Smckusick 			entry = pte->pt_entry;
47152118Smckusick 			if (!(entry & PG_V))
47252118Smckusick 				continue;
47352118Smckusick 			if (entry & PG_WIRED)
47452118Smckusick 				pmap->pm_stats.wired_count--;
47552118Smckusick 			pmap->pm_stats.resident_count--;
47652118Smckusick 			pmap_remove_pv(pmap, va, entry & PG_FRAME);
47752118Smckusick #ifdef ATTR
47852118Smckusick 			pmap_attributes[atop(entry - KERNBASE)] = 0;
47952118Smckusick #endif
48052118Smckusick 			pte->pt_entry = PG_NV;
48152118Smckusick 			/*
48252118Smckusick 			 * Flush the TLB for the given address.
48352118Smckusick 			 */
48452118Smckusick 			MachTLBFlushAddr(va);
48552118Smckusick #ifdef DEBUG
48652118Smckusick 			remove_stats.flushes++;
48752118Smckusick #endif
48852118Smckusick 		}
48952118Smckusick 		return;
49052118Smckusick 	}
49152118Smckusick 
49252118Smckusick 	va = sva | (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
49352118Smckusick 	eva |= (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
49452118Smckusick 	/*
49552118Smckusick 	 * If we are not in the current address space, just flush the
49652118Smckusick 	 * software cache and not the hardware.
49752118Smckusick 	 */
49852118Smckusick 	if (pmap != cur_pmap) {
49952118Smckusick 		for (; va < eva; va += NBPG) {
50052118Smckusick 			hp = &pmap->pm_hash[PMAP_HASH(va)];
50153718Smckusick 			if (hp->pmh_pte[0].high == va)
50253718Smckusick 				i = 0;
50353718Smckusick 			else if (hp->pmh_pte[1].high == va)
50453718Smckusick 				i = 1;
50553718Smckusick 			else
50652118Smckusick 				continue;
50752118Smckusick 
50853718Smckusick 			hp->pmh_pte[i].high = 0;
50953718Smckusick 			entry = hp->pmh_pte[i].low;
51052118Smckusick 			if (entry & PG_WIRED)
51152118Smckusick 				pmap->pm_stats.wired_count--;
51252118Smckusick 			pmap->pm_stats.resident_count--;
51352118Smckusick 			pmap_remove_pv(pmap, va & PG_FRAME, entry & PG_FRAME);
51452118Smckusick #ifdef ATTR
51552118Smckusick 			pmap_attributes[atop(entry - KERNBASE)] = 0;
51652118Smckusick #endif
51752118Smckusick 			pmap->pm_flags |= PM_MODIFIED;
51852118Smckusick #ifdef DEBUG
51952118Smckusick 			remove_stats.removes++;
52052118Smckusick #endif
52152118Smckusick 		}
52252118Smckusick 		return;
52352118Smckusick 	}
52452118Smckusick 
52552118Smckusick 	for (; va < eva; va += NBPG) {
52652118Smckusick 		hp = &pmap->pm_hash[PMAP_HASH(va)];
52753718Smckusick 		if (hp->pmh_pte[0].high == va)
52853718Smckusick 			i = 0;
52953718Smckusick 		else if (hp->pmh_pte[1].high == va)
53053718Smckusick 			i = 1;
53153718Smckusick 		else
53252118Smckusick 			continue;
53352118Smckusick 
53453718Smckusick 		hp->pmh_pte[i].high = 0;
53553718Smckusick 		entry = hp->pmh_pte[i].low;
53652118Smckusick 		if (entry & PG_WIRED)
53752118Smckusick 			pmap->pm_stats.wired_count--;
53852118Smckusick 		pmap->pm_stats.resident_count--;
53952118Smckusick 		pmap_remove_pv(pmap, va & PG_FRAME, entry & PG_FRAME);
54052118Smckusick #ifdef ATTR
54152118Smckusick 		pmap_attributes[atop(entry - KERNBASE)] = 0;
54252118Smckusick #endif
54352118Smckusick 		/*
54453718Smckusick 		* Flush the TLB for the given address.
54553718Smckusick 		*/
54652118Smckusick 		MachTLBFlushAddr(va);
54752118Smckusick #ifdef DEBUG
54852118Smckusick 		remove_stats.flushes++;
54952118Smckusick #endif
55052118Smckusick 	}
55152118Smckusick }
55252118Smckusick 
55352118Smckusick /*
55452118Smckusick  *	pmap_page_protect:
55552118Smckusick  *
55652118Smckusick  *	Lower the permission for all mappings to a given page.
55752118Smckusick  */
55852118Smckusick void
55952118Smckusick pmap_page_protect(pa, prot)
56052118Smckusick 	vm_offset_t pa;
56152118Smckusick 	vm_prot_t prot;
56252118Smckusick {
56352118Smckusick 	register pv_entry_t pv;
56452118Smckusick 	register vm_offset_t va;
56552118Smckusick 	int s;
56652118Smckusick 
56752118Smckusick #ifdef DEBUG
56852118Smckusick 	if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
56952118Smckusick 	    prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))
57052118Smckusick 		printf("pmap_page_protect(%x, %x)\n", pa, prot);
57152118Smckusick #endif
57252118Smckusick 	if (!IS_VM_PHYSADDR(pa))
57352118Smckusick 		return;
57452118Smckusick 
57552118Smckusick 	switch (prot) {
57652118Smckusick 	case VM_PROT_ALL:
57752118Smckusick 		break;
57852118Smckusick 
57952118Smckusick 	/* copy_on_write */
58052118Smckusick 	case VM_PROT_READ:
58152118Smckusick 	case VM_PROT_READ|VM_PROT_EXECUTE:
58252118Smckusick 		pv = pa_to_pvh(pa);
58352118Smckusick 		s = splimp();
58452118Smckusick 		/*
58552118Smckusick 		 * Loop over all current mappings setting/clearing as appropos.
58652118Smckusick 		 */
58752118Smckusick 		if (pv->pv_pmap != NULL) {
58852118Smckusick 			for (; pv; pv = pv->pv_next) {
58952118Smckusick 				extern vm_offset_t pager_sva, pager_eva;
59052118Smckusick 				va = pv->pv_va;
59152118Smckusick 
59252118Smckusick 				/*
59352118Smckusick 				 * XXX don't write protect pager mappings
59452118Smckusick 				 */
59552118Smckusick 				if (va >= pager_sva && va < pager_eva)
59652118Smckusick 					continue;
59752118Smckusick 				pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE,
59852118Smckusick 					prot);
59952118Smckusick 			}
60052118Smckusick 		}
60152118Smckusick 		splx(s);
60252118Smckusick 		break;
60352118Smckusick 
60452118Smckusick 	/* remove_all */
60552118Smckusick 	default:
60652118Smckusick 		pv = pa_to_pvh(pa);
60752118Smckusick 		s = splimp();
60852118Smckusick 		while (pv->pv_pmap != NULL) {
60952118Smckusick 			pmap_remove(pv->pv_pmap, pv->pv_va,
61052118Smckusick 				    pv->pv_va + PAGE_SIZE);
61152118Smckusick 		}
61252118Smckusick 		splx(s);
61352118Smckusick 	}
61452118Smckusick }
61552118Smckusick 
61652118Smckusick /*
61752118Smckusick  *	Set the physical protection on the
61852118Smckusick  *	specified range of this map as requested.
61952118Smckusick  */
62052118Smckusick void
62152118Smckusick pmap_protect(pmap, sva, eva, prot)
62252118Smckusick 	register pmap_t pmap;
62352118Smckusick 	vm_offset_t sva, eva;
62452118Smckusick 	vm_prot_t prot;
62552118Smckusick {
62652118Smckusick 	register vm_offset_t va;
62753718Smckusick 	register int i;
62852118Smckusick 	pmap_hash_t hp;
62952118Smckusick 	u_int p;
63052118Smckusick 
63152118Smckusick #ifdef DEBUG
63252118Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
63352118Smckusick 		printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot);
63452118Smckusick #endif
63552118Smckusick 	if (pmap == NULL)
63652118Smckusick 		return;
63752118Smckusick 
63852118Smckusick 	/* anything in the software cache? */
63952118Smckusick 	if (pmap->pm_tlbpid < 0 || pmap->pm_hash == zero_pmap_hash)
64052118Smckusick 		return;
64152118Smckusick 
64252118Smckusick 	if (!(prot & VM_PROT_READ)) {
64352118Smckusick 		pmap_remove(pmap, sva, eva);
64452118Smckusick 		return;
64552118Smckusick 	}
64652118Smckusick 
64752118Smckusick 	if (!pmap->pm_hash) {
64852118Smckusick 		register pt_entry_t *pte;
64952118Smckusick 
65052118Smckusick 		/*
65152118Smckusick 		 * Change entries in kernel pmap.
65252118Smckusick 		 * This will trap if the page is writeable (in order to set
65352118Smckusick 		 * the dirty bit) even if the dirty bit is already set. The
65452118Smckusick 		 * optimization isn't worth the effort since this code isn't
65552118Smckusick 		 * executed much. The common case is to make a user page
65652118Smckusick 		 * read-only.
65752118Smckusick 		 */
65852741Sralph #ifdef DIAGNOSTIC
65952741Sralph 		if (sva < VM_MIN_KERNEL_ADDRESS ||
66052741Sralph 		    eva > VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES*NPTEPG*NBPG)
66152741Sralph 			panic("pmap_protect");
66252741Sralph #endif
66352118Smckusick 		p = (prot & VM_PROT_WRITE) ? PG_RW : PG_RO;
66452118Smckusick 		pte = kvtopte(sva);
66552118Smckusick 		for (va = sva; va < eva; va += NBPG, pte++) {
66652118Smckusick 			if (!(pte->pt_entry & PG_V))
66752118Smckusick 				continue;
66852118Smckusick 			pte->pt_entry = (pte->pt_entry & ~(PG_M | PG_RO)) | p;
66952118Smckusick 			/*
67052118Smckusick 			 * Update the TLB if the given address is in the cache.
67152118Smckusick 			 */
67252118Smckusick 			MachTLBUpdate(va, pte->pt_entry);
67352118Smckusick 		}
67452118Smckusick 		return;
67552118Smckusick 	}
67652118Smckusick 
67752118Smckusick 	p = (prot & VM_PROT_WRITE) ? PG_RW : PG_RO;
67852118Smckusick 	va = sva | (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
67952118Smckusick 	eva |= (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
68052118Smckusick 	/*
68152118Smckusick 	 * If we are not in the current address space, just flush the
68252118Smckusick 	 * software cache and not the hardware.
68352118Smckusick 	 */
68452118Smckusick 	if (pmap != cur_pmap) {
68552118Smckusick 		for (; va < eva; va += NBPG) {
68652118Smckusick 			hp = &pmap->pm_hash[PMAP_HASH(va)];
68753718Smckusick 			if (hp->pmh_pte[0].high == va)
68853718Smckusick 				i = 0;
68953718Smckusick 			else if (hp->pmh_pte[1].high == va)
69053718Smckusick 				i = 1;
69153718Smckusick 			else
69252118Smckusick 				continue;
69352118Smckusick 
69453718Smckusick 			hp->pmh_pte[i].low = (hp->pmh_pte[i].low & ~(PG_M | PG_RO)) | p;
69552118Smckusick 			pmap->pm_flags |= PM_MODIFIED;
69652118Smckusick 		}
69752118Smckusick 		return;
69852118Smckusick 	}
69952118Smckusick 
70052118Smckusick 	for (; va < eva; va += NBPG) {
70152118Smckusick 		hp = &pmap->pm_hash[PMAP_HASH(va)];
70253718Smckusick 		if (hp->pmh_pte[0].high == va)
70353718Smckusick 			i = 0;
70453718Smckusick 		else if (hp->pmh_pte[1].high == va)
70553718Smckusick 			i = 1;
70653718Smckusick 		else
70752118Smckusick 			continue;
70852118Smckusick 
70953718Smckusick 		hp->pmh_pte[i].low = (hp->pmh_pte[i].low & ~(PG_M | PG_RO)) | p;
71052118Smckusick 		/*
71153718Smckusick 		* Update the TLB if the given address is in the cache.
71253718Smckusick 		*/
71353718Smckusick 		MachTLBUpdate(hp->pmh_pte[i].high, hp->pmh_pte[i].low);
71452118Smckusick 	}
71552118Smckusick }
71652118Smckusick 
71752118Smckusick /*
71852118Smckusick  *	Insert the given physical page (p) at
71952118Smckusick  *	the specified virtual address (v) in the
72052118Smckusick  *	target physical map with the protection requested.
72152118Smckusick  *
72252118Smckusick  *	If specified, the page will be wired down, meaning
72352118Smckusick  *	that the related pte can not be reclaimed.
72452118Smckusick  *
72552118Smckusick  *	NB:  This is the only routine which MAY NOT lazy-evaluate
72652118Smckusick  *	or lose information.  That is, this routine must actually
72752118Smckusick  *	insert this page into the given map NOW.
72852118Smckusick  */
72952118Smckusick void
73052118Smckusick pmap_enter(pmap, va, pa, prot, wired)
73152118Smckusick 	register pmap_t pmap;
73252118Smckusick 	vm_offset_t va;
73352118Smckusick 	register vm_offset_t pa;
73452118Smckusick 	vm_prot_t prot;
73552118Smckusick 	boolean_t wired;
73652118Smckusick {
73752118Smckusick 	register pmap_hash_t hp;
73852118Smckusick 	register u_int npte;
73953718Smckusick 	register int i, j;
74053718Smckusick 	int newpos;
74152118Smckusick 
74252118Smckusick #ifdef DEBUG
74352118Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
74452118Smckusick 		printf("pmap_enter(%x, %x, %x, %x, %x)\n",
74552118Smckusick 		       pmap, va, pa, prot, wired);
74652118Smckusick #endif
74752118Smckusick #ifdef DIAGNOSTIC
74852118Smckusick 	if (!pmap)
74952118Smckusick 		panic("pmap_enter: pmap");
75052118Smckusick 	if (pmap->pm_tlbpid < 0)
75152118Smckusick 		panic("pmap_enter: tlbpid");
75252937Sralph 	if (!pmap->pm_hash) {
75352118Smckusick 		enter_stats.kernel++;
75452937Sralph 		if (va < VM_MIN_KERNEL_ADDRESS ||
75552937Sralph 		    va >= VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES*NPTEPG*NBPG)
75652118Smckusick 			panic("pmap_enter: kva");
75752118Smckusick 	} else {
75852118Smckusick 		enter_stats.user++;
75952118Smckusick 		if (va & 0x80000000)
76052118Smckusick 			panic("pmap_enter: uva");
76152118Smckusick 	}
76252741Sralph 	if (pa & 0x80000000)
76352741Sralph 		panic("pmap_enter: pa");
76452118Smckusick 	if (!(prot & VM_PROT_READ))
76552118Smckusick 		panic("pmap_enter: prot");
76652118Smckusick #endif
76752118Smckusick 
76852118Smckusick 	/*
76952118Smckusick 	 * See if we need to create a new TLB cache.
77052118Smckusick 	 */
77152118Smckusick 	if (pmap->pm_hash == zero_pmap_hash) {
77252118Smckusick 		register vm_offset_t kva;
77352118Smckusick 		register pt_entry_t *pte;
77452118Smckusick 
77552118Smckusick 		kva = kmem_alloc(kernel_map, PMAP_HASH_SIZE);
77652118Smckusick 		pmap->pm_hash = (pmap_hash_t)kva;
77752118Smckusick 
77852118Smckusick 		/*
77952118Smckusick 		 * Convert the kernel virtual address to a physical one
78052118Smckusick 		 * and cache it in the pmap. Note: if the phyical address
78152118Smckusick 		 * can change (due to memory compaction in kmem_alloc?),
78252118Smckusick 		 * we will have to update things.
78352118Smckusick 		 */
78452118Smckusick 		pte = kvtopte(kva);
78552118Smckusick 		for (i = 0; i < PMAP_HASH_UPAGES; i++) {
78652118Smckusick 			pmap->pm_hash_ptes[i] = pte->pt_entry & ~PG_G;
78752118Smckusick 			pte++;
78852118Smckusick 		}
78952118Smckusick 
79052118Smckusick 		/*
79152118Smckusick 		 * Map in new TLB cache if it is current.
79252118Smckusick 		 */
79352118Smckusick 		if (pmap == cur_pmap) {
79452118Smckusick 			for (i = 0; i < PMAP_HASH_UPAGES; i++) {
79552118Smckusick 				MachTLBWriteIndexed(i + UPAGES,
79652118Smckusick 					(PMAP_HASH_UADDR + (i << PGSHIFT)) |
79752118Smckusick 						(pmap->pm_tlbpid  <<
79852118Smckusick 						VMMACH_TLB_PID_SHIFT),
79952118Smckusick 					pmap->pm_hash_ptes[i]);
80052118Smckusick 			}
80152118Smckusick 		}
80252118Smckusick #ifdef DIAGNOSTIC
80352118Smckusick 		for (i = 0; i < PAGE_SIZE; i += sizeof(int), kva += sizeof(int))
80452118Smckusick 			if (*(int *)kva != 0)
80552118Smckusick 				panic("pmap_enter: *kva != 0");
80652118Smckusick #endif
80752118Smckusick 	}
80852118Smckusick 
80952118Smckusick 	if (IS_VM_PHYSADDR(pa)) {
81052118Smckusick 		register pv_entry_t pv, npv;
81152118Smckusick 		int s;
81252118Smckusick 
81352118Smckusick 		if (!(prot & VM_PROT_WRITE))
81452118Smckusick 			npte = PG_RO;
81552118Smckusick 		else {
81652118Smckusick 			register vm_page_t mem;
81752118Smckusick 
81852118Smckusick 			mem = PHYS_TO_VM_PAGE(pa);
81952118Smckusick 			if ((int)va < 0) {
82052118Smckusick 				/*
82152118Smckusick 				 * Don't bother to trap on kernel writes,
82252118Smckusick 				 * just record page as dirty.
82352118Smckusick 				 */
82452118Smckusick 				npte = PG_M;
825*56633Sralph 				mem->flags &= ~PG_CLEAN;
82652118Smckusick 			} else
82752118Smckusick #ifdef ATTR
82852118Smckusick 				if ((pmap_attributes[atop(pa - KERNBASE)] &
829*56633Sralph 				    PMAP_ATTR_MOD) || !(mem->flags & PG_CLEAN))
83052118Smckusick #else
831*56633Sralph 				if (!(mem->flags & PG_CLEAN))
83252118Smckusick #endif
83352118Smckusick 					npte = PG_M;
83452118Smckusick 			else
83552118Smckusick 				npte = 0;
83652118Smckusick 		}
83752118Smckusick 
83852118Smckusick #ifdef DEBUG
83952118Smckusick 		enter_stats.managed++;
84052118Smckusick #endif
84152118Smckusick 		/*
84252118Smckusick 		 * Enter the pmap and virtual address into the
84352118Smckusick 		 * physical to virtual map table.
84452118Smckusick 		 */
84552118Smckusick 		pv = pa_to_pvh(pa);
84652118Smckusick 		s = splimp();
84752118Smckusick #ifdef DEBUG
84852118Smckusick 		if (pmapdebug & PDB_ENTER)
84952118Smckusick 			printf("pmap_enter: pv %x: was %x/%x/%x\n",
85052118Smckusick 			       pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
85152118Smckusick #endif
85252118Smckusick 		if (pv->pv_pmap == NULL) {
85352118Smckusick 			/*
85452118Smckusick 			 * No entries yet, use header as the first entry
85552118Smckusick 			 */
85652118Smckusick #ifdef DEBUG
85752118Smckusick 			enter_stats.firstpv++;
85852118Smckusick #endif
85952118Smckusick 			pv->pv_va = va;
86052118Smckusick 			pv->pv_pmap = pmap;
86152118Smckusick 			pv->pv_next = NULL;
86252118Smckusick 		} else {
86352118Smckusick 			/*
86452118Smckusick 			 * There is at least one other VA mapping this page.
86552118Smckusick 			 * Place this entry after the header.
86652118Smckusick 			 *
86752118Smckusick 			 * Note: the entry may already be in the table if
86852118Smckusick 			 * we are only changing the protection bits.
86952118Smckusick 			 */
87052118Smckusick 			for (npv = pv; npv; npv = npv->pv_next)
87152118Smckusick 				if (pmap == npv->pv_pmap && va == npv->pv_va) {
87252118Smckusick #ifdef DIAGNOSTIC
87352118Smckusick 				    if (!pmap->pm_hash) {
87452118Smckusick 					unsigned entry;
87552118Smckusick 
87652118Smckusick 					entry = kvtopte(va)->pt_entry;
87752118Smckusick 					if (!(entry & PG_V) ||
87852118Smckusick 					    (entry & PG_FRAME) != pa)
87952118Smckusick 			printf("found kva %x pa %x in pv_table but != %x\n",
88052118Smckusick 				va, pa, entry);
88152118Smckusick 				    } else {
88252118Smckusick 					hp = &pmap->pm_hash[PMAP_HASH(va)];
88353718Smckusick 					if ((hp->pmh_pte[0].high == (va |
88453718Smckusick 					(pmap->pm_tlbpid <<
88553718Smckusick 					VMMACH_TLB_PID_SHIFT)) &&
88653718Smckusick 					(hp->pmh_pte[0].low & PG_FRAME) == pa) ||
88753718Smckusick 					(hp->pmh_pte[1].high == (va |
88853718Smckusick 					(pmap->pm_tlbpid <<
88953718Smckusick 					VMMACH_TLB_PID_SHIFT)) &&
89053718Smckusick 					(hp->pmh_pte[1].low & PG_FRAME) == pa))
89153718Smckusick 						goto fnd;
89253718Smckusick 			printf("found va %x pa %x in pv_table but !=\n",
89353718Smckusick 				va, pa);
89452118Smckusick 				    }
89552118Smckusick #endif
89652118Smckusick 					goto fnd;
89752118Smckusick 				}
89852118Smckusick 			/* can this cause us to recurse forever? */
89952118Smckusick 			npv = (pv_entry_t)
90052118Smckusick 				malloc(sizeof *npv, M_VMPVENT, M_NOWAIT);
90152118Smckusick 			npv->pv_va = va;
90252118Smckusick 			npv->pv_pmap = pmap;
90352118Smckusick 			npv->pv_next = pv->pv_next;
90452118Smckusick 			pv->pv_next = npv;
90552118Smckusick #ifdef DEBUG
90652118Smckusick 			if (!npv->pv_next)
90752118Smckusick 				enter_stats.secondpv++;
90852118Smckusick #endif
90952118Smckusick 		fnd:
91052118Smckusick 			;
91152118Smckusick 		}
91252118Smckusick 		splx(s);
91352118Smckusick 	} else {
91452118Smckusick 		/*
91552118Smckusick 		 * Assumption: if it is not part of our managed memory
91652118Smckusick 		 * then it must be device memory which may be volitile.
91752118Smckusick 		 */
91852118Smckusick #ifdef DEBUG
91952118Smckusick 		enter_stats.unmanaged++;
92052118Smckusick #endif
92152118Smckusick 		printf("pmap_enter: UNMANAGED ADDRESS va %x pa %x\n",
92252118Smckusick 			va, pa); /* XXX */
92352118Smckusick 		npte = (prot & VM_PROT_WRITE) ? PG_M : PG_RO;
92452118Smckusick 	}
92552118Smckusick 
92652741Sralph 	/*
92752741Sralph 	 * The only time we need to flush the cache is if we
92852741Sralph 	 * execute from a physical address and then change the data.
92952741Sralph 	 * This is the best place to do this.
93052741Sralph 	 * pmap_protect() and pmap_remove() are mostly used to switch
93152741Sralph 	 * between R/W and R/O pages.
93252741Sralph 	 * NOTE: we only support cache flush for read only text.
93352741Sralph 	 */
93452741Sralph 	if (prot == (VM_PROT_READ | VM_PROT_EXECUTE))
93553611Sralph 		MachFlushICache(MACH_PHYS_TO_CACHED(pa), PAGE_SIZE);
93652741Sralph 
93752118Smckusick 	if (!pmap->pm_hash) {
93852118Smckusick 		register pt_entry_t *pte;
93952118Smckusick 
94052118Smckusick 		/* enter entries into kernel pmap */
94152118Smckusick 		pte = kvtopte(va);
94252118Smckusick 		npte |= pa | PG_V | PG_G;
94352118Smckusick 		if (wired) {
94452118Smckusick 			pmap->pm_stats.wired_count += pmaxpagesperpage;
94552118Smckusick 			npte |= PG_WIRED;
94652118Smckusick 		}
94752118Smckusick 		i = pmaxpagesperpage;
94852118Smckusick 		do {
94952118Smckusick 			if (!(pte->pt_entry & PG_V)) {
95052118Smckusick 				pmap->pm_stats.resident_count++;
95152118Smckusick 				MachTLBWriteRandom(va, npte);
95252118Smckusick 			} else {
95352937Sralph #ifdef DIAGNOSTIC
95452937Sralph 				if (pte->pt_entry & PG_WIRED)
95552937Sralph 					panic("pmap_enter: kernel wired");
95652937Sralph #endif
95752118Smckusick 				/*
95852118Smckusick 				 * Update the same virtual address entry.
95952118Smckusick 				 */
96052118Smckusick 				MachTLBUpdate(va, npte);
96152937Sralph 				printf("TLB update kva %x pte %x -> %x\n",
96252937Sralph 					va, pte->pt_entry, npte); /* XXX */
96352118Smckusick 			}
96452118Smckusick 			pte->pt_entry = npte;
96552118Smckusick 			va += NBPG;
96652118Smckusick 			npte += NBPG;
96752118Smckusick 			pte++;
96852118Smckusick 		} while (--i != 0);
96952118Smckusick 		return;
97052118Smckusick 	}
97152118Smckusick 
97252118Smckusick 	/*
97352118Smckusick 	 * Now validate mapping with desired protection/wiring.
97452118Smckusick 	 * Assume uniform modified and referenced status for all
97552118Smckusick 	 * PMAX pages in a MACH page.
97652118Smckusick 	 */
97752118Smckusick 	npte |= pa | PG_V;
97852118Smckusick 	if (wired) {
97952118Smckusick 		pmap->pm_stats.wired_count += pmaxpagesperpage;
98052118Smckusick 		npte |= PG_WIRED;
98152118Smckusick 	}
98252118Smckusick #ifdef DEBUG
98352118Smckusick 	if (pmapdebug & PDB_ENTER)
98452118Smckusick 		printf("pmap_enter: new pte value %x\n", npte);
98552118Smckusick #endif
98652118Smckusick 	va |= (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
98752118Smckusick 	i = pmaxpagesperpage;
98852118Smckusick 	do {
98952118Smckusick 		hp = &pmap->pm_hash[PMAP_HASH(va)];
99053718Smckusick 		if (hp->pmh_pte[0].high == va &&
99153718Smckusick 		    (hp->pmh_pte[0].low & PG_FRAME) == (npte & PG_FRAME))
99253718Smckusick 			j = 0;
99353718Smckusick 		else if (hp->pmh_pte[1].high == va &&
99453718Smckusick 		    (hp->pmh_pte[1].low & PG_FRAME) == (npte & PG_FRAME))
99553718Smckusick 			j = 1;
99653718Smckusick 		else
99753718Smckusick 			j = -1;
99853718Smckusick 		if (j >= 0) {
99953718Smckusick #ifdef DEBUG
100053718Smckusick 			enter_stats.cachehit++;
100153718Smckusick #endif
100253718Smckusick 			if (!(hp->pmh_pte[j].low & PG_WIRED)) {
100353718Smckusick 				/*
100453718Smckusick 				 * Update the same entry.
100553718Smckusick 				 */
100653718Smckusick 				hp->pmh_pte[j].low = npte;
100753718Smckusick 				MachTLBUpdate(va, npte);
100853718Smckusick 			} else {
100953718Smckusick 				/*
101053718Smckusick 				 * Don't replace wired entries, just update
101153718Smckusick 				 * the hardware TLB.
101253718Smckusick 				 * Bug: routines to flush the TLB won't know
101353718Smckusick 				 * that the entry is in the hardware.
101453718Smckusick 				 */
101553718Smckusick 				printf("pmap_enter: wired va %x %x\n", va,
101653718Smckusick 					hp->pmh_pte[j].low); /* XXX */
101753718Smckusick 				panic("pmap_enter: wired"); /* XXX */
101853718Smckusick 				MachTLBWriteRandom(va, npte);
101953718Smckusick 			}
102053718Smckusick 			goto next;
102153718Smckusick 		}
102253718Smckusick 		if (!hp->pmh_pte[0].high)
102353718Smckusick 			j = 0;
102453718Smckusick 		else if (!hp->pmh_pte[1].high)
102553718Smckusick 			j = 1;
102653718Smckusick 		else
102753718Smckusick 			j = -1;
102853718Smckusick 		if (j >= 0) {
102952118Smckusick 			pmap->pm_stats.resident_count++;
103053718Smckusick 			hp->pmh_pte[j].high = va;
103153718Smckusick 			hp->pmh_pte[j].low = npte;
103252118Smckusick 			MachTLBWriteRandom(va, npte);
103352118Smckusick 		} else {
103452118Smckusick #ifdef DEBUG
103552118Smckusick 			enter_stats.cachehit++;
103652118Smckusick #endif
103753718Smckusick 			if (!(hp->pmh_pte[1].low & PG_WIRED)) {
103853718Smckusick 				MachTLBFlushAddr(hp->pmh_pte[1].high);
103953718Smckusick 				pmap_remove_pv(pmap,
104053718Smckusick 					hp->pmh_pte[1].high & PG_FRAME,
104153718Smckusick 					hp->pmh_pte[1].low & PG_FRAME);
104253718Smckusick 				hp->pmh_pte[1] = hp->pmh_pte[0];
104353718Smckusick 				hp->pmh_pte[0].high = va;
104453718Smckusick 				hp->pmh_pte[0].low = npte;
104553718Smckusick 				MachTLBWriteRandom(va, npte);
104652118Smckusick 			} else {
104752118Smckusick 				/*
104852118Smckusick 				 * Don't replace wired entries, just update
104952118Smckusick 				 * the hardware TLB.
105052118Smckusick 				 * Bug: routines to flush the TLB won't know
105152118Smckusick 				 * that the entry is in the hardware.
105252118Smckusick 				 */
105352118Smckusick 				printf("pmap_enter: wired va %x %x\n", va,
105453718Smckusick 					hp->pmh_pte[1].low); /* XXX */
105552118Smckusick 				panic("pmap_enter: wired"); /* XXX */
105652118Smckusick 				MachTLBWriteRandom(va, npte);
105752118Smckusick 			}
105852118Smckusick 		}
105953718Smckusick next:
106052118Smckusick 		va += NBPG;
106152118Smckusick 		npte += NBPG;
106252118Smckusick 	} while (--i != 0);
106352118Smckusick }
106452118Smckusick 
106552118Smckusick /*
106652118Smckusick  *	Routine:	pmap_change_wiring
106752118Smckusick  *	Function:	Change the wiring attribute for a map/virtual-address
106852118Smckusick  *			pair.
106952118Smckusick  *	In/out conditions:
107052118Smckusick  *			The mapping must already exist in the pmap.
107152118Smckusick  */
107252118Smckusick void
107352118Smckusick pmap_change_wiring(pmap, va, wired)
107452118Smckusick 	register pmap_t	pmap;
107552118Smckusick 	vm_offset_t va;
107652118Smckusick 	boolean_t wired;
107752118Smckusick {
107852118Smckusick 	register pmap_hash_t hp;
107952118Smckusick 	u_int p;
108053718Smckusick 	register int i, j;
108152118Smckusick 
108252118Smckusick #ifdef DEBUG
108352118Smckusick 	if (pmapdebug & PDB_FOLLOW)
108452118Smckusick 		printf("pmap_change_wiring(%x, %x, %x)\n", pmap, va, wired);
108552118Smckusick #endif
108652118Smckusick 	if (pmap == NULL)
108752118Smckusick 		return;
108852118Smckusick 
108952118Smckusick 	p = wired ? PG_WIRED : 0;
109052118Smckusick 
109152118Smckusick 	/*
109252118Smckusick 	 * Don't need to flush the TLB since PG_WIRED is only in software.
109352118Smckusick 	 */
109452118Smckusick 	if (!pmap->pm_hash) {
109552118Smckusick 		register pt_entry_t *pte;
109652118Smckusick 
109752118Smckusick 		/* change entries in kernel pmap */
109852741Sralph #ifdef DIAGNOSTIC
109952741Sralph 		if (va < VM_MIN_KERNEL_ADDRESS ||
110052741Sralph 		    va >= VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES*NPTEPG*NBPG)
110152741Sralph 			panic("pmap_change_wiring");
110252741Sralph #endif
110352118Smckusick 		pte = kvtopte(va);
110452118Smckusick 		i = pmaxpagesperpage;
110552118Smckusick 		if (!(pte->pt_entry & PG_WIRED) && p)
110652118Smckusick 			pmap->pm_stats.wired_count += i;
110752118Smckusick 		else if ((pte->pt_entry & PG_WIRED) && !p)
110852118Smckusick 			pmap->pm_stats.wired_count -= i;
110952118Smckusick 		do {
111052118Smckusick 			if (!(pte->pt_entry & PG_V))
111152118Smckusick 				continue;
111252118Smckusick 			pte->pt_entry = (pte->pt_entry & ~PG_WIRED) | p;
111352118Smckusick 			pte++;
111452118Smckusick 		} while (--i != 0);
111552118Smckusick 	} else if (pmap->pm_tlbpid >= 0 && pmap->pm_hash != zero_pmap_hash) {
111652118Smckusick 		i = pmaxpagesperpage;
111753718Smckusick 		va = (va & PG_FRAME) | (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
111852118Smckusick 		do {
111952118Smckusick 			hp = &pmap->pm_hash[PMAP_HASH(va)];
112053718Smckusick 			if (hp->pmh_pte[0].high == va)
112153718Smckusick 				j = 0;
112253718Smckusick 			else if (hp->pmh_pte[1].high == va)
112353718Smckusick 				j = 1;
112453718Smckusick 			else {
112553718Smckusick 				va += NBPG;
112652118Smckusick 				continue;
112753718Smckusick 			}
112853718Smckusick 			if (!(hp->pmh_pte[j].low & PG_WIRED) && p)
112952118Smckusick 				pmap->pm_stats.wired_count++;
113053718Smckusick 			else if ((hp->pmh_pte[j].low & PG_WIRED) && !p)
113152118Smckusick 				pmap->pm_stats.wired_count--;
113253718Smckusick 			hp->pmh_pte[j].low = (hp->pmh_pte[j].low & ~PG_WIRED) | p;
113352118Smckusick 			va += NBPG;
113452118Smckusick 		} while (--i != 0);
113552118Smckusick 	}
113652118Smckusick }
113752118Smckusick 
113852118Smckusick /*
113952118Smckusick  *	Routine:	pmap_extract
114052118Smckusick  *	Function:
114152118Smckusick  *		Extract the physical page address associated
114252118Smckusick  *		with the given map/virtual_address pair.
114352118Smckusick  */
114452118Smckusick vm_offset_t
114552118Smckusick pmap_extract(pmap, va)
114652118Smckusick 	register pmap_t	pmap;
114752118Smckusick 	vm_offset_t va;
114852118Smckusick {
114952118Smckusick 	register vm_offset_t pa;
115052118Smckusick 	register pmap_hash_t hp;
115153718Smckusick 	register int i;
115252118Smckusick 
115352118Smckusick #ifdef DEBUG
115452118Smckusick 	if (pmapdebug & PDB_FOLLOW)
115552118Smckusick 		printf("pmap_extract(%x, %x) -> ", pmap, va);
115652118Smckusick #endif
115752118Smckusick 
115852741Sralph 	if (!pmap->pm_hash) {
115952741Sralph #ifdef DIAGNOSTIC
116052741Sralph 		if (va < VM_MIN_KERNEL_ADDRESS ||
116152741Sralph 		    va >= VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES*NPTEPG*NBPG)
116252741Sralph 			panic("pmap_extract");
116352741Sralph #endif
116452118Smckusick 		pa = kvtopte(va)->pt_entry & PG_FRAME;
116552741Sralph 	} else if (pmap->pm_tlbpid >= 0) {
116652118Smckusick 		hp = &pmap->pm_hash[PMAP_HASH(va)];
116753718Smckusick 		va = (va & PG_FRAME) | (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
116853718Smckusick 		if (hp->pmh_pte[0].high == va)
116953718Smckusick 			pa = hp->pmh_pte[0].low & PG_FRAME;
117053718Smckusick 		else if (hp->pmh_pte[1].high == va)
117153718Smckusick 			pa = hp->pmh_pte[1].low & PG_FRAME;
117252118Smckusick 		else
117352118Smckusick 			pa = 0;
117452118Smckusick 	} else
117552118Smckusick 		pa = 0;
117652118Smckusick 
117752118Smckusick #ifdef DEBUG
117852118Smckusick 	if (pmapdebug & PDB_FOLLOW)
117952118Smckusick 		printf("%x\n", pa);
118052118Smckusick #endif
118152741Sralph 	return (pa);
118252118Smckusick }
118352118Smckusick 
118452118Smckusick /*
118552118Smckusick  *	Copy the range specified by src_addr/len
118652118Smckusick  *	from the source map to the range dst_addr/len
118752118Smckusick  *	in the destination map.
118852118Smckusick  *
118952118Smckusick  *	This routine is only advisory and need not do anything.
119052118Smckusick  */
119152741Sralph void
119252741Sralph pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
119352118Smckusick 	pmap_t dst_pmap;
119452118Smckusick 	pmap_t src_pmap;
119552118Smckusick 	vm_offset_t dst_addr;
119652118Smckusick 	vm_size_t len;
119752118Smckusick 	vm_offset_t src_addr;
119852118Smckusick {
119952118Smckusick 
120052118Smckusick #ifdef DEBUG
120152118Smckusick 	if (pmapdebug & PDB_FOLLOW)
120252118Smckusick 		printf("pmap_copy(%x, %x, %x, %x, %x)\n",
120352118Smckusick 		       dst_pmap, src_pmap, dst_addr, len, src_addr);
120452118Smckusick #endif
120552118Smckusick }
120652118Smckusick 
120752118Smckusick /*
120852118Smckusick  *	Require that all active physical maps contain no
120952118Smckusick  *	incorrect entries NOW.  [This update includes
121052118Smckusick  *	forcing updates of any address map caching.]
121152118Smckusick  *
121252118Smckusick  *	Generally used to insure that a thread about
121352118Smckusick  *	to run will see a semantically correct world.
121452118Smckusick  */
121552741Sralph void
121652741Sralph pmap_update()
121752118Smckusick {
121852118Smckusick 
121952118Smckusick #ifdef DEBUG
122052118Smckusick 	if (pmapdebug & PDB_FOLLOW)
122152118Smckusick 		printf("pmap_update()\n");
122252118Smckusick #endif
122352118Smckusick }
122452118Smckusick 
122552118Smckusick /*
122652118Smckusick  *	Routine:	pmap_collect
122752118Smckusick  *	Function:
122852118Smckusick  *		Garbage collects the physical map system for
122952118Smckusick  *		pages which are no longer used.
123052118Smckusick  *		Success need not be guaranteed -- that is, there
123152118Smckusick  *		may well be pages which are not referenced, but
123252118Smckusick  *		others may be collected.
123352118Smckusick  *	Usage:
123452118Smckusick  *		Called by the pageout daemon when pages are scarce.
123552118Smckusick  */
123652118Smckusick void
123752118Smckusick pmap_collect(pmap)
123852118Smckusick 	pmap_t pmap;
123952118Smckusick {
124052118Smckusick 
124152118Smckusick #ifdef DEBUG
124252118Smckusick 	if (pmapdebug & PDB_FOLLOW)
124352118Smckusick 		printf("pmap_collect(%x)\n", pmap);
124452118Smckusick #endif
124552118Smckusick }
124652118Smckusick 
124752118Smckusick /*
124852118Smckusick  *	pmap_zero_page zeros the specified (machine independent)
124952118Smckusick  *	page.
125052118Smckusick  */
125152118Smckusick void
125252118Smckusick pmap_zero_page(phys)
125352741Sralph 	vm_offset_t phys;
125452118Smckusick {
125552741Sralph 	register int *p, *end;
125652118Smckusick 
125752118Smckusick #ifdef DEBUG
125852118Smckusick 	if (pmapdebug & PDB_FOLLOW)
125952118Smckusick 		printf("pmap_zero_page(%x)\n", phys);
126052118Smckusick #endif
126152741Sralph 	p = (int *)MACH_PHYS_TO_CACHED(phys);
126252741Sralph 	end = p + PAGE_SIZE / sizeof(int);
126352118Smckusick 	do {
126452741Sralph 		p[0] = 0;
126552741Sralph 		p[1] = 0;
126652741Sralph 		p[2] = 0;
126752741Sralph 		p[3] = 0;
126852741Sralph 		p += 4;
126952741Sralph 	} while (p != end);
127052118Smckusick }
127152118Smckusick 
127252118Smckusick /*
127352118Smckusick  *	pmap_copy_page copies the specified (machine independent)
127452118Smckusick  *	page.
127552118Smckusick  */
127652118Smckusick void
127752118Smckusick pmap_copy_page(src, dst)
127852741Sralph 	vm_offset_t src, dst;
127952118Smckusick {
128052741Sralph 	register int *s, *d, *end;
128152741Sralph 	register int tmp0, tmp1, tmp2, tmp3;
128252118Smckusick 
128352118Smckusick #ifdef DEBUG
128452118Smckusick 	if (pmapdebug & PDB_FOLLOW)
128552118Smckusick 		printf("pmap_copy_page(%x, %x)\n", src, dst);
128652118Smckusick #endif
128752741Sralph 	s = (int *)MACH_PHYS_TO_CACHED(src);
128852741Sralph 	d = (int *)MACH_PHYS_TO_CACHED(dst);
128952741Sralph 	end = s + PAGE_SIZE / sizeof(int);
129052118Smckusick 	do {
129152741Sralph 		tmp0 = s[0];
129252741Sralph 		tmp1 = s[1];
129352741Sralph 		tmp2 = s[2];
129452741Sralph 		tmp3 = s[3];
129552741Sralph 		d[0] = tmp0;
129652741Sralph 		d[1] = tmp1;
129752741Sralph 		d[2] = tmp2;
129852741Sralph 		d[3] = tmp3;
129952741Sralph 		s += 4;
130052741Sralph 		d += 4;
130152741Sralph 	} while (s != end);
130252118Smckusick }
130352118Smckusick 
130452118Smckusick /*
130552118Smckusick  *	Routine:	pmap_pageable
130652118Smckusick  *	Function:
130752118Smckusick  *		Make the specified pages (by pmap, offset)
130852118Smckusick  *		pageable (or not) as requested.
130952118Smckusick  *
131052118Smckusick  *		A page which is not pageable may not take
131152118Smckusick  *		a fault; therefore, its page table entry
131252118Smckusick  *		must remain valid for the duration.
131352118Smckusick  *
131452118Smckusick  *		This routine is merely advisory; pmap_enter
131552118Smckusick  *		will specify that these pages are to be wired
131652118Smckusick  *		down (or not) as appropriate.
131752118Smckusick  */
131852118Smckusick void
131952118Smckusick pmap_pageable(pmap, sva, eva, pageable)
132052118Smckusick 	pmap_t		pmap;
132152118Smckusick 	vm_offset_t	sva, eva;
132252118Smckusick 	boolean_t	pageable;
132352118Smckusick {
132452118Smckusick 
132552118Smckusick #ifdef DEBUG
132652118Smckusick 	if (pmapdebug & PDB_FOLLOW)
132752118Smckusick 		printf("pmap_pageable(%x, %x, %x, %x)\n",
132852118Smckusick 		       pmap, sva, eva, pageable);
132952118Smckusick #endif
133052118Smckusick }
133152118Smckusick 
133252118Smckusick /*
133352118Smckusick  *	Clear the modify bits on the specified physical page.
133452118Smckusick  */
133552118Smckusick void
133652118Smckusick pmap_clear_modify(pa)
133752118Smckusick 	vm_offset_t pa;
133852118Smckusick {
133952118Smckusick 	pmap_hash_t hp;
134052118Smckusick 
134152118Smckusick #ifdef DEBUG
134252118Smckusick 	if (pmapdebug & PDB_FOLLOW)
134352118Smckusick 		printf("pmap_clear_modify(%x)\n", pa);
134452118Smckusick #endif
134552118Smckusick #ifdef ATTR
134652118Smckusick 	pmap_attributes[atop(pa - KERNBASE)] &= ~PMAP_ATTR_MOD;
134752118Smckusick #endif
134852118Smckusick }
134952118Smckusick 
135052118Smckusick /*
135152118Smckusick  *	pmap_clear_reference:
135252118Smckusick  *
135352118Smckusick  *	Clear the reference bit on the specified physical page.
135452118Smckusick  */
135552118Smckusick void
135652118Smckusick pmap_clear_reference(pa)
135752118Smckusick 	vm_offset_t pa;
135852118Smckusick {
135952118Smckusick 
136052118Smckusick #ifdef DEBUG
136152118Smckusick 	if (pmapdebug & PDB_FOLLOW)
136252118Smckusick 		printf("pmap_clear_reference(%x)\n", pa);
136352118Smckusick #endif
136452118Smckusick #ifdef ATTR
136552118Smckusick 	pmap_attributes[atop(pa - KERNBASE)] &= ~PMAP_ATTR_REF;
136652118Smckusick #endif
136752118Smckusick }
136852118Smckusick 
136952118Smckusick /*
137052118Smckusick  *	pmap_is_referenced:
137152118Smckusick  *
137252118Smckusick  *	Return whether or not the specified physical page is referenced
137352118Smckusick  *	by any physical maps.
137452118Smckusick  */
137552118Smckusick boolean_t
137652118Smckusick pmap_is_referenced(pa)
137752118Smckusick 	vm_offset_t pa;
137852118Smckusick {
137952118Smckusick #ifdef ATTR
138052741Sralph 	return (pmap_attributes[atop(pa - KERNBASE)] & PMAP_ATTR_REF);
138152118Smckusick #else
138252741Sralph 	return (FALSE);
138352118Smckusick #endif
138452118Smckusick }
138552118Smckusick 
138652118Smckusick /*
138752118Smckusick  *	pmap_is_modified:
138852118Smckusick  *
138952118Smckusick  *	Return whether or not the specified physical page is modified
139052118Smckusick  *	by any physical maps.
139152118Smckusick  */
139252118Smckusick boolean_t
139352118Smckusick pmap_is_modified(pa)
139452118Smckusick 	vm_offset_t pa;
139552118Smckusick {
139652118Smckusick #ifdef ATTR
139752741Sralph 	return (pmap_attributes[atop(pa - KERNBASE)] & PMAP_ATTR_MOD);
139852118Smckusick #else
139952741Sralph 	return (FALSE);
140052118Smckusick #endif
140152118Smckusick }
140252118Smckusick 
140352118Smckusick vm_offset_t
140452118Smckusick pmap_phys_address(ppn)
140552118Smckusick 	int ppn;
140652118Smckusick {
140752118Smckusick 
140852118Smckusick #ifdef DEBUG
140952118Smckusick 	if (pmapdebug & PDB_FOLLOW)
141052118Smckusick 		printf("pmap_phys_address(%x)\n", ppn);
141152118Smckusick #endif
141252118Smckusick 	panic("pmap_phys_address"); /* XXX */
141352741Sralph 	return (pmax_ptob(ppn));
141452118Smckusick }
141552118Smckusick 
141652118Smckusick /*
141752118Smckusick  * Miscellaneous support routines
141852118Smckusick  */
141952118Smckusick 
142052118Smckusick /*
142152118Smckusick  * Allocate a hardware PID and return it.
142252118Smckusick  * Also, change the hardwired TLB entry to point to the current TLB cache.
142352118Smckusick  * This is called by swtch().
142452118Smckusick  */
142552118Smckusick int
142652118Smckusick pmap_alloc_tlbpid(p)
142752118Smckusick 	register struct proc *p;
142852118Smckusick {
142952118Smckusick 	register pmap_t pmap;
143052118Smckusick 	register u_int i;
143152118Smckusick 	register int id;
143252118Smckusick 
143352118Smckusick 	pmap = &p->p_vmspace->vm_pmap;
143452118Smckusick 	if ((id = pmap->pm_tlbpid) >= 0) {
143552118Smckusick 		if (pmap->pm_flags & PM_MODIFIED) {
143652118Smckusick 			pmap->pm_flags &= ~PM_MODIFIED;
143752118Smckusick 			MachTLBFlushPID(id);
143852118Smckusick 		}
143952118Smckusick 		goto done;
144052118Smckusick 	}
144152118Smckusick 
144252118Smckusick 	if ((i = whichpids[0]) != 0xFFFFFFFF)
144352118Smckusick 		id = 0;
144452118Smckusick 	else if ((i = whichpids[1]) != 0xFFFFFFFF)
144552118Smckusick 		id = 32;
144652118Smckusick 	else {
144752118Smckusick 		register struct proc *q;
144852118Smckusick 		register pmap_t q_pmap;
144952118Smckusick 
145052118Smckusick 		/*
145152118Smckusick 		 * Have to find a tlbpid to recycle.
145252118Smckusick 		 * There is probably a better way to do this.
145352118Smckusick 		 */
145455750Sralph 		for (q = (struct proc *)allproc; q != NULL; q = q->p_nxt) {
145552118Smckusick 			q_pmap = &q->p_vmspace->vm_pmap;
145652118Smckusick 			if ((id = q_pmap->pm_tlbpid) < 0)
145752118Smckusick 				continue;
145852118Smckusick 			if (q->p_stat != SRUN)
145952118Smckusick 				goto fnd;
146052118Smckusick 		}
146152118Smckusick 		if (id < 0)
146252118Smckusick 			panic("TLBPidAlloc");
146352118Smckusick 	fnd:
146452118Smckusick 		printf("pmap_alloc_tlbpid: recycle pid %d (%s) tlbpid %d\n",
146552118Smckusick 			q->p_pid, q->p_comm, id); /* XXX */
146652118Smckusick 		/*
146752118Smckusick 		 * Even though the virtual to physical mapping hasn't changed,
146852118Smckusick 		 * we need to clear the PID tag in the high entry of the cache.
146952118Smckusick 		 */
147052118Smckusick 		if (q_pmap->pm_hash != zero_pmap_hash) {
147152118Smckusick 			register pmap_hash_t hp;
147253718Smckusick 			register int j;
147352118Smckusick 
147452118Smckusick 			hp = q_pmap->pm_hash;
147552118Smckusick 			for (i = 0; i < PMAP_HASH_NUM_ENTRIES; i++, hp++) {
147653718Smckusick 			    for (j = 0; j < 2; j++) {
147753718Smckusick 				if (!hp->pmh_pte[j].high)
147852118Smckusick 					continue;
147952118Smckusick 
148053718Smckusick 				if (hp->pmh_pte[j].low & PG_WIRED) {
148153718Smckusick 					printf("Clearing wired user entry! h %x l %x\n", hp->pmh_pte[j].high, hp->pmh_pte[j].low);
148252118Smckusick 					panic("pmap_alloc_tlbpid: wired");
148352118Smckusick 				}
148453718Smckusick 				pmap_remove_pv(q_pmap,
148553718Smckusick 					hp->pmh_pte[j].high & PG_FRAME,
148653718Smckusick 					hp->pmh_pte[j].low & PG_FRAME);
148753718Smckusick 				hp->pmh_pte[j].high = 0;
148852118Smckusick 				q_pmap->pm_stats.resident_count--;
148953718Smckusick 			    }
149052118Smckusick 			}
149152118Smckusick 		}
149252118Smckusick 		q_pmap->pm_tlbpid = -1;
149352118Smckusick 		MachTLBFlushPID(id);
149452118Smckusick #ifdef DEBUG
149552118Smckusick 		remove_stats.pidflushes++;
149652118Smckusick #endif
149752118Smckusick 		pmap->pm_tlbpid = id;
149852118Smckusick 		goto done;
149952118Smckusick 	}
150052118Smckusick 	while (i & 1) {
150152118Smckusick 		i >>= 1;
150252118Smckusick 		id++;
150352118Smckusick 	}
150452118Smckusick 	whichpids[id >> 5] |= 1 << (id & 0x1F);
150552118Smckusick 	pmap->pm_tlbpid = id;
150652118Smckusick done:
150752118Smckusick 	/*
150852118Smckusick 	 * Map in new TLB cache.
150952118Smckusick 	 */
151052118Smckusick 	if (pmap == cur_pmap)
151152118Smckusick 		return (id);
151252118Smckusick 	cur_pmap = pmap;
151352118Smckusick 	for (i = 0; i < PMAP_HASH_UPAGES; i++) {
151452118Smckusick 		MachTLBWriteIndexed(i + UPAGES,
151552118Smckusick 			(PMAP_HASH_UADDR + (i << PGSHIFT)) |
151652118Smckusick 				(id << VMMACH_TLB_PID_SHIFT),
151752118Smckusick 			pmap->pm_hash_ptes[i]);
151852118Smckusick 	}
151952118Smckusick 	return (id);
152052118Smckusick }
152152118Smckusick 
152252118Smckusick /*
152352118Smckusick  * Remove a physical to virtual address translation.
152452118Smckusick  */
152552118Smckusick void
152652118Smckusick pmap_remove_pv(pmap, va, pa)
152752118Smckusick 	pmap_t pmap;
152852118Smckusick 	vm_offset_t va, pa;
152952118Smckusick {
153052118Smckusick 	register pv_entry_t pv, npv;
153152118Smckusick 	int s;
153252118Smckusick 
153352118Smckusick #ifdef DEBUG
153452118Smckusick 	if (pmapdebug & PDB_FOLLOW)
153552118Smckusick 		printf("pmap_remove_pv(%x, %x, %x)\n", pmap, va, pa);
153652118Smckusick #endif
153752118Smckusick 	/*
153852118Smckusick 	 * Remove page from the PV table (raise IPL since we
153952118Smckusick 	 * may be called at interrupt time).
154052118Smckusick 	 */
154152118Smckusick 	if (!IS_VM_PHYSADDR(pa))
154252118Smckusick 		return;
154352118Smckusick 	pv = pa_to_pvh(pa);
154452118Smckusick 	s = splimp();
154552118Smckusick 	/*
154652118Smckusick 	 * If it is the first entry on the list, it is actually
154752118Smckusick 	 * in the header and we must copy the following entry up
154852118Smckusick 	 * to the header.  Otherwise we must search the list for
154952118Smckusick 	 * the entry.  In either case we free the now unused entry.
155052118Smckusick 	 */
155152118Smckusick 	if (pmap == pv->pv_pmap && va == pv->pv_va) {
155252118Smckusick 		npv = pv->pv_next;
155352118Smckusick 		if (npv) {
155452118Smckusick 			*pv = *npv;
155552118Smckusick 			free((caddr_t)npv, M_VMPVENT);
155652118Smckusick 		} else
155752118Smckusick 			pv->pv_pmap = NULL;
155852118Smckusick #ifdef DEBUG
155952118Smckusick 		remove_stats.pvfirst++;
156052118Smckusick #endif
156152118Smckusick 	} else {
156252118Smckusick 		for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
156352118Smckusick #ifdef DEBUG
156452118Smckusick 			remove_stats.pvsearch++;
156552118Smckusick #endif
156652118Smckusick 			if (pmap == npv->pv_pmap && va == npv->pv_va)
156752118Smckusick 				goto fnd;
156852118Smckusick 		}
156952118Smckusick #ifdef DIAGNOSTIC
157052118Smckusick 		printf("pmap_remove_pv(%x, %x, %x) not found\n", pmap, va, pa);
157152118Smckusick 		panic("pmap_remove_pv");
157252118Smckusick #endif
157352118Smckusick 	fnd:
157452118Smckusick 		pv->pv_next = npv->pv_next;
157552118Smckusick 		free((caddr_t)npv, M_VMPVENT);
157652118Smckusick 	}
157752118Smckusick 	splx(s);
157852118Smckusick }
157952118Smckusick 
158052118Smckusick #ifdef DEBUG
158152118Smckusick pmap_print(pmap)
158252118Smckusick 	pmap_t pmap;
158352118Smckusick {
158452118Smckusick 	register pmap_hash_t hp;
158553718Smckusick 	register int i, j;
158652118Smckusick 
158752118Smckusick 	printf("\tpmap_print(%x)\n", pmap);
158852118Smckusick 
158952118Smckusick 	if (pmap->pm_hash == zero_pmap_hash) {
159052118Smckusick 		printf("pm_hash == zero\n");
159152118Smckusick 		return;
159252118Smckusick 	}
159352118Smckusick 	if (pmap->pm_hash == (pmap_hash_t)0) {
159452118Smckusick 		printf("pm_hash == kernel\n");
159552118Smckusick 		return;
159652118Smckusick 	}
159752118Smckusick 	hp = pmap->pm_hash;
159852118Smckusick 	for (i = 0; i < PMAP_HASH_NUM_ENTRIES; i++, hp++) {
159953718Smckusick 	    for (j = 0; j < 2; j++) {
160053718Smckusick 		if (!hp->pmh_pte[j].high)
160152118Smckusick 			continue;
160253718Smckusick 		printf("%d: hi %x low %x\n", i, hp->pmh_pte[j].high, hp->pmh_pte[j].low);
160353718Smckusick 	    }
160452118Smckusick 	}
160552118Smckusick }
160652118Smckusick #endif
1607