xref: /csrg-svn/sys/pmax/pmax/pmap.c (revision 52741)
152118Smckusick /*
252118Smckusick  * Copyright (c) 1992 The Regents of the University of California.
352118Smckusick  * All rights reserved.
452118Smckusick  *
552118Smckusick  * This code is derived from software contributed to Berkeley by
652118Smckusick  * the Systems Programming Group of the University of Utah Computer
752118Smckusick  * Science Department and Ralph Campbell.
852118Smckusick  *
952118Smckusick  * %sccs.include.redist.c%
1052118Smckusick  *
11*52741Sralph  *	@(#)pmap.c	7.3 (Berkeley) 02/29/92
1252118Smckusick  */
1352118Smckusick 
1452118Smckusick /*
1552118Smckusick  *	Manages physical address maps.
1652118Smckusick  *
1752118Smckusick  *	In addition to hardware address maps, this
1852118Smckusick  *	module is called upon to provide software-use-only
1952118Smckusick  *	maps which may or may not be stored in the same
2052118Smckusick  *	form as hardware maps.  These pseudo-maps are
2152118Smckusick  *	used to store intermediate results from copy
2252118Smckusick  *	operations to and from address spaces.
2352118Smckusick  *
2452118Smckusick  *	Since the information managed by this module is
2552118Smckusick  *	also stored by the logical address mapping module,
2652118Smckusick  *	this module may throw away valid virtual-to-physical
2752118Smckusick  *	mappings at almost any time.  However, invalidations
2852118Smckusick  *	of virtual-to-physical mappings must be done as
2952118Smckusick  *	requested.
3052118Smckusick  *
3152118Smckusick  *	In order to cope with hardware architectures which
3252118Smckusick  *	make virtual-to-physical map invalidates expensive,
3352118Smckusick  *	this module may delay invalidate or reduced protection
3452118Smckusick  *	operations until such time as they are actually
3552118Smckusick  *	necessary.  This module is given full information as
3652118Smckusick  *	to which processors are currently using which maps,
3752118Smckusick  *	and to when physical maps must be made correct.
3852118Smckusick  */
3952118Smckusick 
4052118Smckusick #include "param.h"
4152118Smckusick #include "proc.h"
4252118Smckusick #include "malloc.h"
4352118Smckusick #include "user.h"
4452118Smckusick 
4552118Smckusick #include "vm/vm.h"
4652118Smckusick #include "vm/vm_kern.h"
4752118Smckusick #include "vm/vm_page.h"
4852118Smckusick 
4952118Smckusick #include "../include/machConst.h"
50*52741Sralph #include "../include/pte.h"
5152118Smckusick 
5252118Smckusick /*
5352118Smckusick  * For each vm_page_t, there is a list of all currently valid virtual
5452118Smckusick  * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
5552118Smckusick  * XXX really should do this as a part of the higher level code.
5652118Smckusick  */
5752118Smckusick typedef struct pv_entry {
5852118Smckusick 	struct pv_entry	*pv_next;	/* next pv_entry */
5952118Smckusick 	struct pmap	*pv_pmap;	/* pmap where mapping lies */
6052118Smckusick 	vm_offset_t	pv_va;		/* virtual address for mapping */
6152118Smckusick 	int		pv_flags;	/* flags */
6252118Smckusick } *pv_entry_t;
6352118Smckusick 
6452118Smckusick pv_entry_t	pv_table;	/* array of entries, one per page */
6552118Smckusick extern void	pmap_remove_pv();
6652118Smckusick 
6752118Smckusick #define pa_index(pa)		atop((pa) - first_phys_addr)
6852118Smckusick #define pa_to_pvh(pa)		(&pv_table[pa_index(pa)])
6952118Smckusick 
7052118Smckusick #ifdef DEBUG
7152118Smckusick struct {
7252118Smckusick 	int kernel;	/* entering kernel mapping */
7352118Smckusick 	int user;	/* entering user mapping */
7452118Smckusick 	int ptpneeded;	/* needed to allocate a PT page */
7552118Smckusick 	int pwchange;	/* no mapping change, just wiring or protection */
7652118Smckusick 	int wchange;	/* no mapping change, just wiring */
7752118Smckusick 	int mchange;	/* was mapped but mapping to different page */
7852118Smckusick 	int managed;	/* a managed page */
7952118Smckusick 	int firstpv;	/* first mapping for this PA */
8052118Smckusick 	int secondpv;	/* second mapping for this PA */
8152118Smckusick 	int ci;		/* cache inhibited */
8252118Smckusick 	int unmanaged;	/* not a managed page */
8352118Smckusick 	int flushes;	/* cache flushes */
8452118Smckusick 	int cachehit;	/* new entry forced valid entry out */
8552118Smckusick } enter_stats;
8652118Smckusick struct {
8752118Smckusick 	int calls;
8852118Smckusick 	int removes;
8952118Smckusick 	int flushes;
9052118Smckusick 	int pidflushes;	/* HW pid stolen */
9152118Smckusick 	int pvfirst;
9252118Smckusick 	int pvsearch;
9352118Smckusick } remove_stats;
9452118Smckusick 
9552118Smckusick int pmapdebug;
9652118Smckusick #define PDB_FOLLOW	0x0001
9752118Smckusick #define PDB_INIT	0x0002
9852118Smckusick #define PDB_ENTER	0x0004
9952118Smckusick #define PDB_REMOVE	0x0008
10052118Smckusick #define PDB_CREATE	0x0010
10152118Smckusick #define PDB_PTPAGE	0x0020
10252118Smckusick #define PDB_CACHE	0x0040
10352118Smckusick #define PDB_BITS	0x0080
10452118Smckusick #define PDB_COLLECT	0x0100
10552118Smckusick #define PDB_PROTECT	0x0200
10652118Smckusick #define PDB_TLBPID	0x0400
10752118Smckusick #define PDB_PARANOIA	0x2000
10852118Smckusick #define PDB_WIRING	0x4000
10952118Smckusick #define PDB_PVDUMP	0x8000
11052118Smckusick 
11152118Smckusick #endif /* DEBUG */
11252118Smckusick 
11352118Smckusick u_int	whichpids[2] = {	/* bit mask of hardware PID's in use */
11452118Smckusick 	3, 0
11552118Smckusick };
11652118Smckusick 
11752118Smckusick struct pmap	kernel_pmap_store;
11852118Smckusick pmap_t		cur_pmap;	/* current pmap mapped in hardware */
11952118Smckusick 
12052118Smckusick vm_offset_t    	avail_start;	/* PA of first available physical page */
12152118Smckusick vm_offset_t	avail_end;	/* PA of last available physical page */
12252118Smckusick vm_size_t	mem_size;	/* memory size in bytes */
12352118Smckusick vm_offset_t	virtual_avail;  /* VA of first avail page (after kernel bss)*/
12452118Smckusick vm_offset_t	virtual_end;	/* VA of last avail page (end of kernel AS) */
12552118Smckusick int		pmaxpagesperpage;	/* PAGE_SIZE / NBPG */
12652118Smckusick #ifdef ATTR
12752118Smckusick char		*pmap_attributes;	/* reference and modify bits */
12852118Smckusick #endif
12952118Smckusick pmap_hash_t	zero_pmap_hash;		/* empty TLB hash table for init */
13052118Smckusick 
13152118Smckusick /*
13252118Smckusick  *	Bootstrap the system enough to run with virtual memory.
13352118Smckusick  */
13452118Smckusick void
13552118Smckusick pmap_bootstrap(firstaddr)
13652118Smckusick 	vm_offset_t firstaddr;
13752118Smckusick {
13852118Smckusick 	register int i;
13952118Smckusick 	vm_offset_t start = firstaddr;
140*52741Sralph 	vm_offset_t pa;
14152118Smckusick 	extern int maxmem, physmem;
14252118Smckusick 
14352118Smckusick 	/*
14452118Smckusick 	 * Allocate a TLB hash table for the kernel.
14552118Smckusick 	 * This could be a KSEG0 address and thus save TLB entries but
14652118Smckusick 	 * its faster and simpler in assembly language to have a
14752118Smckusick 	 * fixed address that can be accessed with a 16 bit signed offset.
14852118Smckusick 	 * Note: the kernel pm_hash field is null, user pm_hash fields are
14952118Smckusick 	 * either the table or zero_pmap_hash.
15052118Smckusick 	 */
15152118Smckusick 	kernel_pmap_store.pm_hash = (pmap_hash_t)0;
15252118Smckusick 	for (i = 0; i < PMAP_HASH_KPAGES; i++) {
15352118Smckusick 		MachTLBWriteIndexed(i + UPAGES + PMAP_HASH_UPAGES,
15452118Smckusick 			PMAP_HASH_KADDR + (i << PGSHIFT),
15552118Smckusick 			firstaddr | PG_V | PG_M | PG_G);
15652118Smckusick 		firstaddr += NBPG;
15752118Smckusick 	}
15852118Smckusick 
15952118Smckusick 	/*
16052118Smckusick 	 * Allocate an empty TLB hash table for initial pmap's.
16152118Smckusick 	 */
162*52741Sralph 	zero_pmap_hash = (pmap_hash_t)MACH_PHYS_TO_CACHED(firstaddr);
163*52741Sralph 	pa = firstaddr;
16452118Smckusick 	firstaddr += PMAP_HASH_UPAGES * NBPG;
16552118Smckusick 
16652118Smckusick 	/* init proc[0]'s pmap hash table */
16752118Smckusick 	for (i = 0; i < PMAP_HASH_UPAGES; i++) {
168*52741Sralph 		kernel_pmap_store.pm_hash_ptes[i] = pa | PG_V | PG_RO;
16952118Smckusick 		MachTLBWriteIndexed(i + UPAGES,
17052118Smckusick 			(PMAP_HASH_UADDR + (i << PGSHIFT)) |
17152118Smckusick 				(1 << VMMACH_TLB_PID_SHIFT),
17252118Smckusick 			kernel_pmap_store.pm_hash_ptes[i]);
173*52741Sralph 		pa += NBPG;
17452118Smckusick 	}
17552118Smckusick 
17652118Smckusick 	/*
17752118Smckusick 	 * Allocate memory for pv_table.
17852118Smckusick 	 * This will allocate more entries than we really need.
17952118Smckusick 	 * We should do this in pmap_init when we know the actual
18052118Smckusick 	 * phys_start and phys_end but its better to use phys addresses
18152118Smckusick 	 * rather than kernel virtual addresses mapped through the TLB.
18252118Smckusick 	 */
18352118Smckusick 	i = (maxmem - pmax_btop(firstaddr)) * sizeof(struct pv_entry);
18452118Smckusick 	i = pmax_round_page(i);
185*52741Sralph 	pv_table = (pv_entry_t)MACH_PHYS_TO_CACHED(firstaddr);
18652118Smckusick 	firstaddr += i;
18752118Smckusick 
18852118Smckusick 	/*
18952118Smckusick 	 * Clear allocated memory.
19052118Smckusick 	 */
191*52741Sralph 	bzero((caddr_t)MACH_PHYS_TO_CACHED(start), firstaddr - start);
19252118Smckusick 
19352118Smckusick 	avail_start = firstaddr;
19452118Smckusick 	avail_end = pmax_ptob(maxmem);
19552118Smckusick 	mem_size = avail_end - avail_start;
19652118Smckusick 
19752118Smckusick 	virtual_avail = VM_MIN_KERNEL_ADDRESS;
19852118Smckusick 	virtual_end = VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES * NPTEPG * NBPG;
19952118Smckusick 	/* XXX need to decide how to set cnt.v_page_size */
20052118Smckusick 	pmaxpagesperpage = 1;
20152118Smckusick 
20252606Smckusick 	cur_pmap = &kernel_pmap_store;
20352118Smckusick 	simple_lock_init(&kernel_pmap->pm_lock);
20452118Smckusick 	kernel_pmap->pm_count = 1;
20552118Smckusick }
20652118Smckusick 
20752118Smckusick /*
20852118Smckusick  * Bootstrap memory allocator. This function allows for early dynamic
20952118Smckusick  * memory allocation until the virtual memory system has been bootstrapped.
21052118Smckusick  * After that point, either kmem_alloc or malloc should be used. This
21152118Smckusick  * function works by stealing pages from the (to be) managed page pool,
21252118Smckusick  * stealing virtual address space, then mapping the pages and zeroing them.
21352118Smckusick  *
21452118Smckusick  * It should be used from pmap_bootstrap till vm_page_startup, afterwards
21552118Smckusick  * it cannot be used, and will generate a panic if tried. Note that this
21652118Smckusick  * memory will never be freed, and in essence it is wired down.
21752118Smckusick  */
21852118Smckusick void *
21952118Smckusick pmap_bootstrap_alloc(size)
22052118Smckusick 	int size;
22152118Smckusick {
22252118Smckusick 	vm_offset_t val;
22352118Smckusick 	extern boolean_t vm_page_startup_initialized;
22452118Smckusick 
22552118Smckusick 	if (vm_page_startup_initialized)
22652118Smckusick 		panic("pmap_bootstrap_alloc: called after startup initialized");
22752118Smckusick 
228*52741Sralph 	val = MACH_PHYS_TO_CACHED(avail_start);
22952118Smckusick 	size = round_page(size);
23052118Smckusick 	avail_start += size;
23152118Smckusick 
232*52741Sralph 	blkclr((caddr_t)val, size);
233*52741Sralph 	return ((void *)val);
23452118Smckusick }
23552118Smckusick 
23652118Smckusick /*
23752118Smckusick  *	Initialize the pmap module.
23852118Smckusick  *	Called by vm_init, to initialize any structures that the pmap
23952118Smckusick  *	system needs to map virtual memory.
24052118Smckusick  */
24152118Smckusick void
24252118Smckusick pmap_init(phys_start, phys_end)
24352118Smckusick 	vm_offset_t phys_start, phys_end;
24452118Smckusick {
24552118Smckusick 
24652118Smckusick #ifdef DEBUG
24752118Smckusick 	if (pmapdebug & PDB_FOLLOW)
24852118Smckusick 		printf("pmap_init(%x, %x)\n", phys_start, phys_end);
24952118Smckusick #endif
25052118Smckusick }
25152118Smckusick 
25252118Smckusick /*
25352118Smckusick  *	Used to map a range of physical addresses into kernel
25452118Smckusick  *	virtual address space.
25552118Smckusick  *
25652118Smckusick  *	This routine should only be called by vm_page_startup()
25752118Smckusick  *	with KSEG0 addresses.
25852118Smckusick  */
25952118Smckusick vm_offset_t
26052118Smckusick pmap_map(virt, start, end, prot)
26152118Smckusick 	vm_offset_t virt;
26252118Smckusick 	vm_offset_t start;
26352118Smckusick 	vm_offset_t end;
26452118Smckusick 	int prot;
26552118Smckusick {
26652118Smckusick 
26752118Smckusick #ifdef DEBUG
26852118Smckusick 	if (pmapdebug & PDB_FOLLOW)
26952118Smckusick 		printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot);
27052118Smckusick #endif
27152118Smckusick 
272*52741Sralph 	return (round_page(end));
27352118Smckusick }
27452118Smckusick 
27552118Smckusick /*
27652118Smckusick  *	Create and return a physical map.
27752118Smckusick  *
27852118Smckusick  *	If the size specified for the map
27952118Smckusick  *	is zero, the map is an actual physical
28052118Smckusick  *	map, and may be referenced by the
28152118Smckusick  *	hardware.
28252118Smckusick  *
28352118Smckusick  *	If the size specified is non-zero,
28452118Smckusick  *	the map will be used in software only, and
28552118Smckusick  *	is bounded by that size.
28652118Smckusick  */
28752118Smckusick pmap_t
28852118Smckusick pmap_create(size)
28952118Smckusick 	vm_size_t size;
29052118Smckusick {
29152118Smckusick 	register pmap_t pmap;
29252118Smckusick 
29352118Smckusick #ifdef DEBUG
29452118Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
29552118Smckusick 		printf("pmap_create(%x)\n", size);
29652118Smckusick #endif
29752118Smckusick 	/*
29852118Smckusick 	 * Software use map does not need a pmap
29952118Smckusick 	 */
30052118Smckusick 	if (size)
301*52741Sralph 		return (NULL);
30252118Smckusick 
30352118Smckusick 	printf("pmap_create(%x) XXX\n", size); /* XXX */
30452118Smckusick 	/* XXX: is it ok to wait here? */
30552118Smckusick 	pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
30652118Smckusick #ifdef notifwewait
30752118Smckusick 	if (pmap == NULL)
30852118Smckusick 		panic("pmap_create: cannot allocate a pmap");
30952118Smckusick #endif
31052118Smckusick 	bzero(pmap, sizeof(*pmap));
31152118Smckusick 	pmap_pinit(pmap);
31252118Smckusick 	return (pmap);
31352118Smckusick }
31452118Smckusick 
31552118Smckusick /*
31652118Smckusick  * Initialize a preallocated and zeroed pmap structure,
31752118Smckusick  * such as one in a vmspace structure.
31852118Smckusick  */
31952118Smckusick void
32052118Smckusick pmap_pinit(pmap)
32152118Smckusick 	register struct pmap *pmap;
32252118Smckusick {
32352118Smckusick 	register int i;
32452118Smckusick 	extern struct vmspace vmspace0;
32552118Smckusick 
32652118Smckusick #ifdef DEBUG
32752118Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
32852118Smckusick 		printf("pmap_pinit(%x)\n", pmap);
32952118Smckusick #endif
33052118Smckusick 	simple_lock_init(&pmap->pm_lock);
33152118Smckusick 	pmap->pm_count = 1;
33252118Smckusick 	pmap->pm_flags = 0;
33352118Smckusick 	pmap->pm_hash = zero_pmap_hash;
33452118Smckusick 	for (i = 0; i < PMAP_HASH_UPAGES; i++)
33552118Smckusick 		pmap->pm_hash_ptes[i] =
33652118Smckusick 			((u_int)zero_pmap_hash + (i << PGSHIFT)) | PG_V | PG_RO;
33752118Smckusick 	if (pmap == &vmspace0.vm_pmap)
33852118Smckusick 		pmap->pm_tlbpid = 1;	/* preallocated in mach_init() */
33952118Smckusick 	else
34052118Smckusick 		pmap->pm_tlbpid = -1;	/* none allocated yet */
34152118Smckusick }
34252118Smckusick 
34352118Smckusick /*
34452118Smckusick  *	Retire the given physical map from service.
34552118Smckusick  *	Should only be called if the map contains
34652118Smckusick  *	no valid mappings.
34752118Smckusick  */
34852118Smckusick void
34952118Smckusick pmap_destroy(pmap)
35052118Smckusick 	register pmap_t pmap;
35152118Smckusick {
35252118Smckusick 	int count;
35352118Smckusick 
35452118Smckusick #ifdef DEBUG
35552118Smckusick 	if (pmapdebug & PDB_FOLLOW)
35652118Smckusick 		printf("pmap_destroy(%x)\n", pmap);
35752118Smckusick #endif
35852118Smckusick 	if (pmap == NULL)
35952118Smckusick 		return;
36052118Smckusick 
36152118Smckusick 	printf("pmap_destroy(%x) XXX\n", pmap); /* XXX */
36252118Smckusick 	simple_lock(&pmap->pm_lock);
36352118Smckusick 	count = --pmap->pm_count;
36452118Smckusick 	simple_unlock(&pmap->pm_lock);
36552118Smckusick 	if (count == 0) {
36652118Smckusick 		pmap_release(pmap);
36752118Smckusick 		free((caddr_t)pmap, M_VMPMAP);
36852118Smckusick 	}
36952118Smckusick }
37052118Smckusick 
37152118Smckusick /*
37252118Smckusick  * Release any resources held by the given physical map.
37352118Smckusick  * Called when a pmap initialized by pmap_pinit is being released.
37452118Smckusick  * Should only be called if the map contains no valid mappings.
37552118Smckusick  */
37652118Smckusick void
37752118Smckusick pmap_release(pmap)
37852118Smckusick 	register pmap_t pmap;
37952118Smckusick {
38052118Smckusick 	register int id;
38152118Smckusick #ifdef DIAGNOSTIC
38252118Smckusick 	register int i;
38352118Smckusick #endif
38452118Smckusick 
38552118Smckusick #ifdef DEBUG
38652118Smckusick 	if (pmapdebug & PDB_FOLLOW)
38752118Smckusick 		printf("pmap_release(%x)\n", pmap);
38852118Smckusick #endif
38952118Smckusick 
39052118Smckusick 	if (pmap->pm_hash && pmap->pm_hash != zero_pmap_hash) {
39152118Smckusick 		kmem_free(kernel_map, (vm_offset_t)pmap->pm_hash,
39252118Smckusick 			PMAP_HASH_SIZE);
39352118Smckusick 		pmap->pm_hash = zero_pmap_hash;
39452118Smckusick 	}
39552118Smckusick 	if ((id = pmap->pm_tlbpid) < 0)
39652118Smckusick 		return;
39752118Smckusick #ifdef DIAGNOSTIC
39852118Smckusick 	if (!(whichpids[id >> 5] & (1 << (id & 0x1F))))
39952118Smckusick 		panic("pmap_release: id free");
40052118Smckusick #endif
40152118Smckusick 	MachTLBFlushPID(id);
40252118Smckusick 	whichpids[id >> 5] &= ~(1 << (id & 0x1F));
40352118Smckusick 	pmap->pm_flags &= ~PM_MODIFIED;
40452118Smckusick 	pmap->pm_tlbpid = -1;
40552118Smckusick 	if (pmap == cur_pmap)
40652118Smckusick 		cur_pmap = (pmap_t)0;
40752118Smckusick #ifdef DIAGNOSTIC
40852118Smckusick 	/* invalidate user PTE cache */
40952118Smckusick 	for (i = 0; i < PMAP_HASH_UPAGES; i++)
41052118Smckusick 		MachTLBWriteIndexed(i + UPAGES, MACH_RESERVED_ADDR, 0);
41152118Smckusick #endif
41252118Smckusick }
41352118Smckusick 
41452118Smckusick /*
41552118Smckusick  *	Add a reference to the specified pmap.
41652118Smckusick  */
41752118Smckusick void
41852118Smckusick pmap_reference(pmap)
41952118Smckusick 	pmap_t pmap;
42052118Smckusick {
42152118Smckusick 
42252118Smckusick #ifdef DEBUG
42352118Smckusick 	if (pmapdebug & PDB_FOLLOW)
42452118Smckusick 		printf("pmap_reference(%x)\n", pmap);
42552118Smckusick #endif
42652118Smckusick 	if (pmap != NULL) {
42752118Smckusick 		simple_lock(&pmap->pm_lock);
42852118Smckusick 		pmap->pm_count++;
42952118Smckusick 		simple_unlock(&pmap->pm_lock);
43052118Smckusick 	}
43152118Smckusick }
43252118Smckusick 
43352118Smckusick /*
43452118Smckusick  *	Remove the given range of addresses from the specified map.
43552118Smckusick  *
43652118Smckusick  *	It is assumed that the start and end are properly
43752118Smckusick  *	rounded to the page size.
43852118Smckusick  */
43952118Smckusick void
44052118Smckusick pmap_remove(pmap, sva, eva)
44152118Smckusick 	register pmap_t pmap;
44252118Smckusick 	vm_offset_t sva, eva;
44352118Smckusick {
44452118Smckusick 	register vm_offset_t va;
44552118Smckusick 	register pv_entry_t pv, npv;
44652118Smckusick 	pmap_hash_t hp;
44752118Smckusick 	unsigned entry;
44852118Smckusick 
44952118Smckusick #ifdef DEBUG
45052118Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
45152118Smckusick 		printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva);
45252118Smckusick 	remove_stats.calls++;
45352118Smckusick #endif
45452118Smckusick 	if (pmap == NULL)
45552118Smckusick 		return;
45652118Smckusick 
45752118Smckusick 	/* anything in the cache? */
45852118Smckusick 	if (pmap->pm_tlbpid < 0 || pmap->pm_hash == zero_pmap_hash)
45952118Smckusick 		return;
46052118Smckusick 
46152118Smckusick 	if (!pmap->pm_hash) {
46252118Smckusick 		register pt_entry_t *pte;
46352118Smckusick 
46452118Smckusick 		/* remove entries from kernel pmap */
465*52741Sralph #ifdef DIAGNOSTIC
466*52741Sralph 		if (sva < VM_MIN_KERNEL_ADDRESS ||
467*52741Sralph 		    eva > VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES*NPTEPG*NBPG)
468*52741Sralph 			panic("pmap_remove");
469*52741Sralph #endif
47052118Smckusick 		pte = kvtopte(sva);
47152118Smckusick 		for (va = sva; va < eva; va += NBPG, pte++) {
47252118Smckusick 			entry = pte->pt_entry;
47352118Smckusick 			if (!(entry & PG_V))
47452118Smckusick 				continue;
47552118Smckusick 			if (entry & PG_WIRED)
47652118Smckusick 				pmap->pm_stats.wired_count--;
47752118Smckusick 			pmap->pm_stats.resident_count--;
47852118Smckusick 			pmap_remove_pv(pmap, va, entry & PG_FRAME);
47952118Smckusick #ifdef ATTR
48052118Smckusick 			pmap_attributes[atop(entry - KERNBASE)] = 0;
48152118Smckusick #endif
48252118Smckusick 			pte->pt_entry = PG_NV;
48352118Smckusick 			/*
48452118Smckusick 			 * Flush the TLB for the given address.
48552118Smckusick 			 */
48652118Smckusick 			MachTLBFlushAddr(va);
48752118Smckusick #ifdef DEBUG
48852118Smckusick 			remove_stats.flushes++;
48952118Smckusick #endif
49052118Smckusick 		}
49152118Smckusick 		return;
49252118Smckusick 	}
49352118Smckusick 
49452118Smckusick 	va = sva | (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
49552118Smckusick 	eva |= (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
49652118Smckusick 	/*
49752118Smckusick 	 * If we are not in the current address space, just flush the
49852118Smckusick 	 * software cache and not the hardware.
49952118Smckusick 	 */
50052118Smckusick 	if (pmap != cur_pmap) {
50152118Smckusick 		for (; va < eva; va += NBPG) {
50252118Smckusick 			hp = &pmap->pm_hash[PMAP_HASH(va)];
50352118Smckusick 			if (hp->high != va)
50452118Smckusick 				continue;
50552118Smckusick 
50652118Smckusick 			hp->high = 0;
50752118Smckusick 			entry = hp->low;
50852118Smckusick 			if (entry & PG_WIRED)
50952118Smckusick 				pmap->pm_stats.wired_count--;
51052118Smckusick 			pmap->pm_stats.resident_count--;
51152118Smckusick 			pmap_remove_pv(pmap, va & PG_FRAME, entry & PG_FRAME);
51252118Smckusick #ifdef ATTR
51352118Smckusick 			pmap_attributes[atop(entry - KERNBASE)] = 0;
51452118Smckusick #endif
51552118Smckusick 			pmap->pm_flags |= PM_MODIFIED;
51652118Smckusick #ifdef DEBUG
51752118Smckusick 			remove_stats.removes++;
51852118Smckusick #endif
51952118Smckusick 		}
52052118Smckusick 		return;
52152118Smckusick 	}
52252118Smckusick 
52352118Smckusick 	for (; va < eva; va += NBPG) {
52452118Smckusick 		hp = &pmap->pm_hash[PMAP_HASH(va)];
52552118Smckusick 		if (hp->high != va)
52652118Smckusick 			continue;
52752118Smckusick 
52852118Smckusick 		hp->high = 0;
52952118Smckusick 		entry = hp->low;
53052118Smckusick 		if (entry & PG_WIRED)
53152118Smckusick 			pmap->pm_stats.wired_count--;
53252118Smckusick 		pmap->pm_stats.resident_count--;
53352118Smckusick 		pmap_remove_pv(pmap, va & PG_FRAME, entry & PG_FRAME);
53452118Smckusick #ifdef ATTR
53552118Smckusick 		pmap_attributes[atop(entry - KERNBASE)] = 0;
53652118Smckusick #endif
53752118Smckusick 		/*
53852118Smckusick 		 * Flush the TLB for the given address.
53952118Smckusick 		 */
54052118Smckusick 		MachTLBFlushAddr(va);
54152118Smckusick #ifdef DEBUG
54252118Smckusick 		remove_stats.flushes++;
54352118Smckusick #endif
54452118Smckusick 	}
54552118Smckusick }
54652118Smckusick 
54752118Smckusick /*
54852118Smckusick  *	pmap_page_protect:
54952118Smckusick  *
55052118Smckusick  *	Lower the permission for all mappings to a given page.
55152118Smckusick  */
55252118Smckusick void
55352118Smckusick pmap_page_protect(pa, prot)
55452118Smckusick 	vm_offset_t pa;
55552118Smckusick 	vm_prot_t prot;
55652118Smckusick {
55752118Smckusick 	register pv_entry_t pv;
55852118Smckusick 	register vm_offset_t va;
55952118Smckusick 	int s;
56052118Smckusick 
56152118Smckusick #ifdef DEBUG
56252118Smckusick 	if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
56352118Smckusick 	    prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))
56452118Smckusick 		printf("pmap_page_protect(%x, %x)\n", pa, prot);
56552118Smckusick #endif
56652118Smckusick 	if (!IS_VM_PHYSADDR(pa))
56752118Smckusick 		return;
56852118Smckusick 
56952118Smckusick 	switch (prot) {
57052118Smckusick 	case VM_PROT_ALL:
57152118Smckusick 		break;
57252118Smckusick 
57352118Smckusick 	/* copy_on_write */
57452118Smckusick 	case VM_PROT_READ:
57552118Smckusick 	case VM_PROT_READ|VM_PROT_EXECUTE:
57652118Smckusick 		pv = pa_to_pvh(pa);
57752118Smckusick 		s = splimp();
57852118Smckusick 		/*
57952118Smckusick 		 * Loop over all current mappings setting/clearing as appropos.
58052118Smckusick 		 */
58152118Smckusick 		if (pv->pv_pmap != NULL) {
58252118Smckusick 			for (; pv; pv = pv->pv_next) {
58352118Smckusick 				extern vm_offset_t pager_sva, pager_eva;
58452118Smckusick 				va = pv->pv_va;
58552118Smckusick 
58652118Smckusick 				/*
58752118Smckusick 				 * XXX don't write protect pager mappings
58852118Smckusick 				 */
58952118Smckusick 				if (va >= pager_sva && va < pager_eva)
59052118Smckusick 					continue;
59152118Smckusick 				pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE,
59252118Smckusick 					prot);
59352118Smckusick 			}
59452118Smckusick 		}
59552118Smckusick 		splx(s);
59652118Smckusick 		break;
59752118Smckusick 
59852118Smckusick 	/* remove_all */
59952118Smckusick 	default:
60052118Smckusick 		pv = pa_to_pvh(pa);
60152118Smckusick 		s = splimp();
60252118Smckusick 		while (pv->pv_pmap != NULL) {
60352118Smckusick 			pmap_remove(pv->pv_pmap, pv->pv_va,
60452118Smckusick 				    pv->pv_va + PAGE_SIZE);
60552118Smckusick 		}
60652118Smckusick 		splx(s);
60752118Smckusick 	}
60852118Smckusick }
60952118Smckusick 
61052118Smckusick /*
61152118Smckusick  *	Set the physical protection on the
61252118Smckusick  *	specified range of this map as requested.
61352118Smckusick  */
61452118Smckusick void
61552118Smckusick pmap_protect(pmap, sva, eva, prot)
61652118Smckusick 	register pmap_t pmap;
61752118Smckusick 	vm_offset_t sva, eva;
61852118Smckusick 	vm_prot_t prot;
61952118Smckusick {
62052118Smckusick 	register vm_offset_t va;
62152118Smckusick 	pmap_hash_t hp;
62252118Smckusick 	u_int p;
62352118Smckusick 
62452118Smckusick #ifdef DEBUG
62552118Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
62652118Smckusick 		printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot);
62752118Smckusick #endif
62852118Smckusick 	if (pmap == NULL)
62952118Smckusick 		return;
63052118Smckusick 
63152118Smckusick 	/* anything in the software cache? */
63252118Smckusick 	if (pmap->pm_tlbpid < 0 || pmap->pm_hash == zero_pmap_hash)
63352118Smckusick 		return;
63452118Smckusick 
63552118Smckusick 	if (!(prot & VM_PROT_READ)) {
63652118Smckusick 		pmap_remove(pmap, sva, eva);
63752118Smckusick 		return;
63852118Smckusick 	}
63952118Smckusick 
64052118Smckusick 	if (!pmap->pm_hash) {
64152118Smckusick 		register pt_entry_t *pte;
64252118Smckusick 
64352118Smckusick 		/*
64452118Smckusick 		 * Change entries in kernel pmap.
64552118Smckusick 		 * This will trap if the page is writeable (in order to set
64652118Smckusick 		 * the dirty bit) even if the dirty bit is already set. The
64752118Smckusick 		 * optimization isn't worth the effort since this code isn't
64852118Smckusick 		 * executed much. The common case is to make a user page
64952118Smckusick 		 * read-only.
65052118Smckusick 		 */
651*52741Sralph #ifdef DIAGNOSTIC
652*52741Sralph 		if (sva < VM_MIN_KERNEL_ADDRESS ||
653*52741Sralph 		    eva > VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES*NPTEPG*NBPG)
654*52741Sralph 			panic("pmap_protect");
655*52741Sralph #endif
65652118Smckusick 		p = (prot & VM_PROT_WRITE) ? PG_RW : PG_RO;
65752118Smckusick 		pte = kvtopte(sva);
65852118Smckusick 		for (va = sva; va < eva; va += NBPG, pte++) {
65952118Smckusick 			if (!(pte->pt_entry & PG_V))
66052118Smckusick 				continue;
66152118Smckusick 			pte->pt_entry = (pte->pt_entry & ~(PG_M | PG_RO)) | p;
66252118Smckusick 			/*
66352118Smckusick 			 * Update the TLB if the given address is in the cache.
66452118Smckusick 			 */
66552118Smckusick 			MachTLBUpdate(va, pte->pt_entry);
66652118Smckusick 		}
66752118Smckusick 		return;
66852118Smckusick 	}
66952118Smckusick 
67052118Smckusick 	p = (prot & VM_PROT_WRITE) ? PG_RW : PG_RO;
67152118Smckusick 	va = sva | (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
67252118Smckusick 	eva |= (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
67352118Smckusick 	/*
67452118Smckusick 	 * If we are not in the current address space, just flush the
67552118Smckusick 	 * software cache and not the hardware.
67652118Smckusick 	 */
67752118Smckusick 	if (pmap != cur_pmap) {
67852118Smckusick 		for (; va < eva; va += NBPG) {
67952118Smckusick 			hp = &pmap->pm_hash[PMAP_HASH(va)];
68052118Smckusick 			if (hp->high != va)
68152118Smckusick 				continue;
68252118Smckusick 
68352118Smckusick 			hp->low = (hp->low & ~(PG_M | PG_RO)) | p;
68452118Smckusick 			pmap->pm_flags |= PM_MODIFIED;
68552118Smckusick 		}
68652118Smckusick 		return;
68752118Smckusick 	}
68852118Smckusick 
68952118Smckusick 	for (; va < eva; va += NBPG) {
69052118Smckusick 		hp = &pmap->pm_hash[PMAP_HASH(va)];
69152118Smckusick 		if (hp->high != va)
69252118Smckusick 			continue;
69352118Smckusick 
69452118Smckusick 		hp->low = (hp->low & ~(PG_M | PG_RO)) | p;
69552118Smckusick 		/*
69652118Smckusick 		 * Update the TLB if the given address is in the cache.
69752118Smckusick 		 */
69852118Smckusick 		MachTLBUpdate(hp->high, hp->low);
69952118Smckusick 	}
70052118Smckusick }
70152118Smckusick 
70252118Smckusick /*
70352118Smckusick  *	Insert the given physical page (p) at
70452118Smckusick  *	the specified virtual address (v) in the
70552118Smckusick  *	target physical map with the protection requested.
70652118Smckusick  *
70752118Smckusick  *	If specified, the page will be wired down, meaning
70852118Smckusick  *	that the related pte can not be reclaimed.
70952118Smckusick  *
71052118Smckusick  *	NB:  This is the only routine which MAY NOT lazy-evaluate
71152118Smckusick  *	or lose information.  That is, this routine must actually
71252118Smckusick  *	insert this page into the given map NOW.
71352118Smckusick  */
71452118Smckusick void
71552118Smckusick pmap_enter(pmap, va, pa, prot, wired)
71652118Smckusick 	register pmap_t pmap;
71752118Smckusick 	vm_offset_t va;
71852118Smckusick 	register vm_offset_t pa;
71952118Smckusick 	vm_prot_t prot;
72052118Smckusick 	boolean_t wired;
72152118Smckusick {
72252118Smckusick 	register pmap_hash_t hp;
72352118Smckusick 	register u_int npte;
72452118Smckusick 	register int i;
72552118Smckusick 
72652118Smckusick #ifdef DEBUG
72752118Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
72852118Smckusick 		printf("pmap_enter(%x, %x, %x, %x, %x)\n",
72952118Smckusick 		       pmap, va, pa, prot, wired);
73052118Smckusick #endif
73152118Smckusick #ifdef DIAGNOSTIC
73252118Smckusick 	if (!pmap)
73352118Smckusick 		panic("pmap_enter: pmap");
73452118Smckusick 	if (pmap->pm_tlbpid < 0)
73552118Smckusick 		panic("pmap_enter: tlbpid");
73652118Smckusick 	if (pmap == kernel_pmap) {
73752118Smckusick 		enter_stats.kernel++;
73852118Smckusick 		if ((va & 0xE0000000) != 0xC0000000)
73952118Smckusick 			panic("pmap_enter: kva");
74052118Smckusick 	} else {
74152118Smckusick 		enter_stats.user++;
74252118Smckusick 		if (va & 0x80000000)
74352118Smckusick 			panic("pmap_enter: uva");
74452118Smckusick 	}
745*52741Sralph 	if (pa & 0x80000000)
746*52741Sralph 		panic("pmap_enter: pa");
74752118Smckusick 	if (!(prot & VM_PROT_READ))
74852118Smckusick 		panic("pmap_enter: prot");
74952118Smckusick #endif
75052118Smckusick 
75152118Smckusick 	/*
75252118Smckusick 	 * See if we need to create a new TLB cache.
75352118Smckusick 	 */
75452118Smckusick 	if (pmap->pm_hash == zero_pmap_hash) {
75552118Smckusick 		register vm_offset_t kva;
75652118Smckusick 		register pt_entry_t *pte;
75752118Smckusick 
75852118Smckusick 		kva = kmem_alloc(kernel_map, PMAP_HASH_SIZE);
75952118Smckusick 		pmap->pm_hash = (pmap_hash_t)kva;
76052118Smckusick 
76152118Smckusick 		/*
76252118Smckusick 		 * Convert the kernel virtual address to a physical one
76352118Smckusick 		 * and cache it in the pmap. Note: if the phyical address
76452118Smckusick 		 * can change (due to memory compaction in kmem_alloc?),
76552118Smckusick 		 * we will have to update things.
76652118Smckusick 		 */
76752118Smckusick 		pte = kvtopte(kva);
76852118Smckusick 		for (i = 0; i < PMAP_HASH_UPAGES; i++) {
76952118Smckusick 			pmap->pm_hash_ptes[i] = pte->pt_entry & ~PG_G;
77052118Smckusick 			pte++;
77152118Smckusick 		}
77252118Smckusick 
77352118Smckusick 		/*
77452118Smckusick 		 * Map in new TLB cache if it is current.
77552118Smckusick 		 */
77652118Smckusick 		if (pmap == cur_pmap) {
77752118Smckusick #ifdef DIAGNOSTIC
77852118Smckusick 			if (pmap->pm_tlbpid < 0)
77952118Smckusick 				panic("pmap_enter: tlbpid");
78052118Smckusick #endif
78152118Smckusick 			for (i = 0; i < PMAP_HASH_UPAGES; i++) {
78252118Smckusick 				MachTLBWriteIndexed(i + UPAGES,
78352118Smckusick 					(PMAP_HASH_UADDR + (i << PGSHIFT)) |
78452118Smckusick 						(pmap->pm_tlbpid  <<
78552118Smckusick 						VMMACH_TLB_PID_SHIFT),
78652118Smckusick 					pmap->pm_hash_ptes[i]);
78752118Smckusick 			}
78852118Smckusick 		}
78952118Smckusick #ifdef DIAGNOSTIC
79052118Smckusick 		for (i = 0; i < PAGE_SIZE; i += sizeof(int), kva += sizeof(int))
79152118Smckusick 			if (*(int *)kva != 0)
79252118Smckusick 				panic("pmap_enter: *kva != 0");
79352118Smckusick #endif
79452118Smckusick 	}
79552118Smckusick 
79652118Smckusick 	if (IS_VM_PHYSADDR(pa)) {
79752118Smckusick 		register pv_entry_t pv, npv;
79852118Smckusick 		int s;
79952118Smckusick 
80052118Smckusick 		if (!(prot & VM_PROT_WRITE))
80152118Smckusick 			npte = PG_RO;
80252118Smckusick 		else {
80352118Smckusick 			register vm_page_t mem;
80452118Smckusick 
80552118Smckusick 			mem = PHYS_TO_VM_PAGE(pa);
80652118Smckusick 			if ((int)va < 0) {
80752118Smckusick 				/*
80852118Smckusick 				 * Don't bother to trap on kernel writes,
80952118Smckusick 				 * just record page as dirty.
81052118Smckusick 				 */
81152118Smckusick 				npte = PG_M;
81252118Smckusick 				mem->clean = FALSE;
81352118Smckusick 			} else
81452118Smckusick #ifdef ATTR
81552118Smckusick 				if ((pmap_attributes[atop(pa - KERNBASE)] &
81652118Smckusick 				    PMAP_ATTR_MOD) || !mem->clean)
81752118Smckusick #else
81852118Smckusick 				if (!mem->clean)
81952118Smckusick #endif
82052118Smckusick 					npte = PG_M;
82152118Smckusick 			else
82252118Smckusick 				npte = 0;
82352118Smckusick 		}
82452118Smckusick 
82552118Smckusick #ifdef DEBUG
82652118Smckusick 		enter_stats.managed++;
82752118Smckusick #endif
82852118Smckusick 		/*
82952118Smckusick 		 * Enter the pmap and virtual address into the
83052118Smckusick 		 * physical to virtual map table.
83152118Smckusick 		 */
83252118Smckusick 		pv = pa_to_pvh(pa);
83352118Smckusick 		s = splimp();
83452118Smckusick #ifdef DEBUG
83552118Smckusick 		if (pmapdebug & PDB_ENTER)
83652118Smckusick 			printf("pmap_enter: pv %x: was %x/%x/%x\n",
83752118Smckusick 			       pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
83852118Smckusick #endif
83952118Smckusick 		if (pv->pv_pmap == NULL) {
84052118Smckusick 			/*
84152118Smckusick 			 * No entries yet, use header as the first entry
84252118Smckusick 			 */
84352118Smckusick #ifdef DEBUG
84452118Smckusick 			enter_stats.firstpv++;
84552118Smckusick #endif
84652118Smckusick 			pv->pv_va = va;
84752118Smckusick 			pv->pv_pmap = pmap;
84852118Smckusick 			pv->pv_next = NULL;
84952118Smckusick 			pv->pv_flags = 0;
85052118Smckusick 		} else {
85152118Smckusick 			/*
85252118Smckusick 			 * There is at least one other VA mapping this page.
85352118Smckusick 			 * Place this entry after the header.
85452118Smckusick 			 *
85552118Smckusick 			 * Note: the entry may already be in the table if
85652118Smckusick 			 * we are only changing the protection bits.
85752118Smckusick 			 */
85852118Smckusick 			for (npv = pv; npv; npv = npv->pv_next)
85952118Smckusick 				if (pmap == npv->pv_pmap && va == npv->pv_va) {
86052118Smckusick #ifdef DIAGNOSTIC
86152118Smckusick 				    if (!pmap->pm_hash) {
86252118Smckusick 					unsigned entry;
86352118Smckusick 
86452118Smckusick 					entry = kvtopte(va)->pt_entry;
86552118Smckusick 					if (!(entry & PG_V) ||
86652118Smckusick 					    (entry & PG_FRAME) != pa)
86752118Smckusick 			printf("found kva %x pa %x in pv_table but != %x\n",
86852118Smckusick 				va, pa, entry);
86952118Smckusick 				    } else {
87052118Smckusick 					hp = &pmap->pm_hash[PMAP_HASH(va)];
87152118Smckusick 					if (hp->high != (va |
87252118Smckusick 					    (pmap->pm_tlbpid <<
87352118Smckusick 					    VMMACH_TLB_PID_SHIFT)) ||
87452118Smckusick 					    (hp->low & PG_FRAME) != pa)
87552118Smckusick 			printf("found va %x pa %x in pv_table but != %x %x\n",
87652118Smckusick 				va, pa, hp->high, hp->low);
87752118Smckusick 				    }
87852118Smckusick #endif
87952118Smckusick 					goto fnd;
88052118Smckusick 				}
88152118Smckusick 			/* can this cause us to recurse forever? */
88252118Smckusick 			npv = (pv_entry_t)
88352118Smckusick 				malloc(sizeof *npv, M_VMPVENT, M_NOWAIT);
88452118Smckusick 			npv->pv_va = va;
88552118Smckusick 			npv->pv_pmap = pmap;
88652118Smckusick 			npv->pv_next = pv->pv_next;
88752118Smckusick 			pv->pv_next = npv;
88852118Smckusick #ifdef DEBUG
88952118Smckusick 			if (!npv->pv_next)
89052118Smckusick 				enter_stats.secondpv++;
89152118Smckusick #endif
89252118Smckusick 		fnd:
89352118Smckusick 			;
89452118Smckusick 		}
89552118Smckusick 		splx(s);
89652118Smckusick 	} else {
89752118Smckusick 		/*
89852118Smckusick 		 * Assumption: if it is not part of our managed memory
89952118Smckusick 		 * then it must be device memory which may be volitile.
90052118Smckusick 		 */
90152118Smckusick #ifdef DEBUG
90252118Smckusick 		enter_stats.unmanaged++;
90352118Smckusick #endif
90452118Smckusick 		printf("pmap_enter: UNMANAGED ADDRESS va %x pa %x\n",
90552118Smckusick 			va, pa); /* XXX */
90652118Smckusick 		npte = (prot & VM_PROT_WRITE) ? PG_M : PG_RO;
90752118Smckusick 	}
90852118Smckusick 
909*52741Sralph 	/*
910*52741Sralph 	 * The only time we need to flush the cache is if we
911*52741Sralph 	 * execute from a physical address and then change the data.
912*52741Sralph 	 * This is the best place to do this.
913*52741Sralph 	 * pmap_protect() and pmap_remove() are mostly used to switch
914*52741Sralph 	 * between R/W and R/O pages.
915*52741Sralph 	 * NOTE: we only support cache flush for read only text.
916*52741Sralph 	 */
917*52741Sralph #if 0
918*52741Sralph 	if (prot == (VM_PROT_READ | VM_PROT_EXECUTE))
919*52741Sralph 		MachFlushICache(MACH_PHYS_TO_UNCACHED(pa), PAGE_SIZE);
920*52741Sralph #endif
921*52741Sralph 
92252118Smckusick 	if (!pmap->pm_hash) {
92352118Smckusick 		register pt_entry_t *pte;
92452118Smckusick 
92552118Smckusick 		/* enter entries into kernel pmap */
92652118Smckusick 		pte = kvtopte(va);
92752118Smckusick 		npte |= pa | PG_V | PG_G;
92852118Smckusick 		if (wired) {
92952118Smckusick 			pmap->pm_stats.wired_count += pmaxpagesperpage;
93052118Smckusick 			npte |= PG_WIRED;
93152118Smckusick 		}
93252118Smckusick 		i = pmaxpagesperpage;
93352118Smckusick 		do {
93452118Smckusick 			if (!(pte->pt_entry & PG_V)) {
93552118Smckusick 				pmap->pm_stats.resident_count++;
93652118Smckusick 				MachTLBWriteRandom(va, npte);
93752118Smckusick 			} else {
93852118Smckusick 				/*
93952118Smckusick 				 * Update the same virtual address entry.
94052118Smckusick 				 */
94152118Smckusick 				MachTLBUpdate(va, npte);
94252118Smckusick 			}
94352118Smckusick 			pte->pt_entry = npte;
94452118Smckusick 			va += NBPG;
94552118Smckusick 			npte += NBPG;
94652118Smckusick 			pte++;
94752118Smckusick 		} while (--i != 0);
94852118Smckusick 		return;
94952118Smckusick 	}
95052118Smckusick 
95152118Smckusick 	/*
95252118Smckusick 	 * Now validate mapping with desired protection/wiring.
95352118Smckusick 	 * Assume uniform modified and referenced status for all
95452118Smckusick 	 * PMAX pages in a MACH page.
95552118Smckusick 	 */
95652118Smckusick 	npte |= pa | PG_V;
95752118Smckusick 	if (wired) {
95852118Smckusick 		pmap->pm_stats.wired_count += pmaxpagesperpage;
95952118Smckusick 		npte |= PG_WIRED;
96052118Smckusick 	}
96152118Smckusick #ifdef DEBUG
96252118Smckusick 	if (pmapdebug & PDB_ENTER)
96352118Smckusick 		printf("pmap_enter: new pte value %x\n", npte);
96452118Smckusick #endif
96552118Smckusick 	va |= (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
96652118Smckusick 	i = pmaxpagesperpage;
96752118Smckusick 	do {
96852118Smckusick 		hp = &pmap->pm_hash[PMAP_HASH(va)];
96952118Smckusick 		if (!hp->high) {
97052118Smckusick 			pmap->pm_stats.resident_count++;
97152118Smckusick 			hp->high = va;
97252118Smckusick 			hp->low = npte;
97352118Smckusick 			MachTLBWriteRandom(va, npte);
97452118Smckusick 		} else {
97552118Smckusick #ifdef DEBUG
97652118Smckusick 			enter_stats.cachehit++;
97752118Smckusick #endif
97852118Smckusick 			if (hp->high == va) {
97952118Smckusick 				/*
98052118Smckusick 				 * Update the same entry.
98152118Smckusick 				 */
98252118Smckusick 				hp->low = npte;
98352118Smckusick 				MachTLBUpdate(va, npte);
98452118Smckusick 			} else if (!(hp->low & PG_WIRED)) {
98552118Smckusick 				MachTLBFlushAddr(hp->high);
98652118Smckusick 				pmap_remove_pv(pmap, hp->high & PG_FRAME,
98752118Smckusick 					hp->low & PG_FRAME);
98852118Smckusick 				hp->high = va;
98952118Smckusick 				hp->low = npte;
99052118Smckusick 				MachTLBWriteRandom(va, npte);
99152118Smckusick 			} else {
99252118Smckusick 				/*
99352118Smckusick 				 * Don't replace wired entries, just update
99452118Smckusick 				 * the hardware TLB.
99552118Smckusick 				 * Bug: routines to flush the TLB won't know
99652118Smckusick 				 * that the entry is in the hardware.
99752118Smckusick 				 */
99852118Smckusick 				printf("pmap_enter: wired va %x %x\n", va,
99952118Smckusick 					hp->low); /* XXX */
100052118Smckusick 				panic("pmap_enter: wired"); /* XXX */
100152118Smckusick 				MachTLBWriteRandom(va, npte);
100252118Smckusick 			}
100352118Smckusick 		}
100452118Smckusick 		va += NBPG;
100552118Smckusick 		npte += NBPG;
100652118Smckusick 	} while (--i != 0);
100752118Smckusick }
100852118Smckusick 
100952118Smckusick /*
101052118Smckusick  *	Routine:	pmap_change_wiring
101152118Smckusick  *	Function:	Change the wiring attribute for a map/virtual-address
101252118Smckusick  *			pair.
101352118Smckusick  *	In/out conditions:
101452118Smckusick  *			The mapping must already exist in the pmap.
101552118Smckusick  */
101652118Smckusick void
101752118Smckusick pmap_change_wiring(pmap, va, wired)
101852118Smckusick 	register pmap_t	pmap;
101952118Smckusick 	vm_offset_t va;
102052118Smckusick 	boolean_t wired;
102152118Smckusick {
102252118Smckusick 	register pmap_hash_t hp;
102352118Smckusick 	u_int p;
102452118Smckusick 	int i;
102552118Smckusick 
102652118Smckusick #ifdef DEBUG
102752118Smckusick 	if (pmapdebug & PDB_FOLLOW)
102852118Smckusick 		printf("pmap_change_wiring(%x, %x, %x)\n", pmap, va, wired);
102952118Smckusick #endif
103052118Smckusick 	if (pmap == NULL)
103152118Smckusick 		return;
103252118Smckusick 
103352118Smckusick 	p = wired ? PG_WIRED : 0;
103452118Smckusick 
103552118Smckusick 	/*
103652118Smckusick 	 * Don't need to flush the TLB since PG_WIRED is only in software.
103752118Smckusick 	 */
103852118Smckusick 	if (!pmap->pm_hash) {
103952118Smckusick 		register pt_entry_t *pte;
104052118Smckusick 
104152118Smckusick 		/* change entries in kernel pmap */
1042*52741Sralph #ifdef DIAGNOSTIC
1043*52741Sralph 		if (va < VM_MIN_KERNEL_ADDRESS ||
1044*52741Sralph 		    va >= VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES*NPTEPG*NBPG)
1045*52741Sralph 			panic("pmap_change_wiring");
1046*52741Sralph #endif
104752118Smckusick 		pte = kvtopte(va);
104852118Smckusick 		i = pmaxpagesperpage;
104952118Smckusick 		if (!(pte->pt_entry & PG_WIRED) && p)
105052118Smckusick 			pmap->pm_stats.wired_count += i;
105152118Smckusick 		else if ((pte->pt_entry & PG_WIRED) && !p)
105252118Smckusick 			pmap->pm_stats.wired_count -= i;
105352118Smckusick 		do {
105452118Smckusick 			if (!(pte->pt_entry & PG_V))
105552118Smckusick 				continue;
105652118Smckusick 			pte->pt_entry = (pte->pt_entry & ~PG_WIRED) | p;
105752118Smckusick 			pte++;
105852118Smckusick 		} while (--i != 0);
105952118Smckusick 	} else if (pmap->pm_tlbpid >= 0 && pmap->pm_hash != zero_pmap_hash) {
106052118Smckusick 		i = pmaxpagesperpage;
106152118Smckusick 		do {
106252118Smckusick 			hp = &pmap->pm_hash[PMAP_HASH(va)];
106352118Smckusick 			if (!hp->high)
106452118Smckusick 				continue;
106552118Smckusick 			if (!(hp->low & PG_WIRED) && p)
106652118Smckusick 				pmap->pm_stats.wired_count++;
106752118Smckusick 			else if ((hp->low & PG_WIRED) && !p)
106852118Smckusick 				pmap->pm_stats.wired_count--;
106952118Smckusick 			hp->low = (hp->low & ~PG_WIRED) | p;
107052118Smckusick 			va += NBPG;
107152118Smckusick 		} while (--i != 0);
107252118Smckusick 	}
107352118Smckusick }
107452118Smckusick 
107552118Smckusick /*
107652118Smckusick  *	Routine:	pmap_extract
107752118Smckusick  *	Function:
107852118Smckusick  *		Extract the physical page address associated
107952118Smckusick  *		with the given map/virtual_address pair.
108052118Smckusick  */
108152118Smckusick vm_offset_t
108252118Smckusick pmap_extract(pmap, va)
108352118Smckusick 	register pmap_t	pmap;
108452118Smckusick 	vm_offset_t va;
108552118Smckusick {
108652118Smckusick 	register vm_offset_t pa;
108752118Smckusick 	register pmap_hash_t hp;
108852118Smckusick 
108952118Smckusick #ifdef DEBUG
109052118Smckusick 	if (pmapdebug & PDB_FOLLOW)
109152118Smckusick 		printf("pmap_extract(%x, %x) -> ", pmap, va);
109252118Smckusick #endif
109352118Smckusick 
1094*52741Sralph 	if (!pmap->pm_hash) {
1095*52741Sralph #ifdef DIAGNOSTIC
1096*52741Sralph 		if (va < VM_MIN_KERNEL_ADDRESS ||
1097*52741Sralph 		    va >= VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES*NPTEPG*NBPG)
1098*52741Sralph 			panic("pmap_extract");
1099*52741Sralph #endif
110052118Smckusick 		pa = kvtopte(va)->pt_entry & PG_FRAME;
1101*52741Sralph 	} else if (pmap->pm_tlbpid >= 0) {
110252118Smckusick 		hp = &pmap->pm_hash[PMAP_HASH(va)];
110352118Smckusick 		if (hp->high)
110452118Smckusick 			pa = hp->low & PG_FRAME;
110552118Smckusick 		else
110652118Smckusick 			pa = 0;
110752118Smckusick 	} else
110852118Smckusick 		pa = 0;
110952118Smckusick 
111052118Smckusick #ifdef DEBUG
111152118Smckusick 	if (pmapdebug & PDB_FOLLOW)
111252118Smckusick 		printf("%x\n", pa);
111352118Smckusick #endif
1114*52741Sralph 	return (pa);
111552118Smckusick }
111652118Smckusick 
111752118Smckusick /*
111852118Smckusick  *	Copy the range specified by src_addr/len
111952118Smckusick  *	from the source map to the range dst_addr/len
112052118Smckusick  *	in the destination map.
112152118Smckusick  *
112252118Smckusick  *	This routine is only advisory and need not do anything.
112352118Smckusick  */
1124*52741Sralph void
1125*52741Sralph pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
112652118Smckusick 	pmap_t dst_pmap;
112752118Smckusick 	pmap_t src_pmap;
112852118Smckusick 	vm_offset_t dst_addr;
112952118Smckusick 	vm_size_t len;
113052118Smckusick 	vm_offset_t src_addr;
113152118Smckusick {
113252118Smckusick 
113352118Smckusick #ifdef DEBUG
113452118Smckusick 	if (pmapdebug & PDB_FOLLOW)
113552118Smckusick 		printf("pmap_copy(%x, %x, %x, %x, %x)\n",
113652118Smckusick 		       dst_pmap, src_pmap, dst_addr, len, src_addr);
113752118Smckusick #endif
113852118Smckusick }
113952118Smckusick 
114052118Smckusick /*
114152118Smckusick  *	Require that all active physical maps contain no
114252118Smckusick  *	incorrect entries NOW.  [This update includes
114352118Smckusick  *	forcing updates of any address map caching.]
114452118Smckusick  *
114552118Smckusick  *	Generally used to insure that a thread about
114652118Smckusick  *	to run will see a semantically correct world.
114752118Smckusick  */
1148*52741Sralph void
1149*52741Sralph pmap_update()
115052118Smckusick {
115152118Smckusick 
115252118Smckusick #ifdef DEBUG
115352118Smckusick 	if (pmapdebug & PDB_FOLLOW)
115452118Smckusick 		printf("pmap_update()\n");
115552118Smckusick #endif
115652118Smckusick }
115752118Smckusick 
115852118Smckusick /*
115952118Smckusick  *	Routine:	pmap_collect
116052118Smckusick  *	Function:
116152118Smckusick  *		Garbage collects the physical map system for
116252118Smckusick  *		pages which are no longer used.
116352118Smckusick  *		Success need not be guaranteed -- that is, there
116452118Smckusick  *		may well be pages which are not referenced, but
116552118Smckusick  *		others may be collected.
116652118Smckusick  *	Usage:
116752118Smckusick  *		Called by the pageout daemon when pages are scarce.
116852118Smckusick  */
116952118Smckusick void
117052118Smckusick pmap_collect(pmap)
117152118Smckusick 	pmap_t pmap;
117252118Smckusick {
117352118Smckusick 
117452118Smckusick #ifdef DEBUG
117552118Smckusick 	if (pmapdebug & PDB_FOLLOW)
117652118Smckusick 		printf("pmap_collect(%x)\n", pmap);
117752118Smckusick #endif
117852118Smckusick }
117952118Smckusick 
118052118Smckusick /*
118152118Smckusick  *	pmap_zero_page zeros the specified (machine independent)
118252118Smckusick  *	page.
118352118Smckusick  */
118452118Smckusick void
118552118Smckusick pmap_zero_page(phys)
1186*52741Sralph 	vm_offset_t phys;
118752118Smckusick {
1188*52741Sralph 	register int *p, *end;
118952118Smckusick 
119052118Smckusick #ifdef DEBUG
119152118Smckusick 	if (pmapdebug & PDB_FOLLOW)
119252118Smckusick 		printf("pmap_zero_page(%x)\n", phys);
119352118Smckusick #endif
1194*52741Sralph 	p = (int *)MACH_PHYS_TO_CACHED(phys);
1195*52741Sralph 	end = p + PAGE_SIZE / sizeof(int);
119652118Smckusick 	do {
1197*52741Sralph 		p[0] = 0;
1198*52741Sralph 		p[1] = 0;
1199*52741Sralph 		p[2] = 0;
1200*52741Sralph 		p[3] = 0;
1201*52741Sralph 		p += 4;
1202*52741Sralph 	} while (p != end);
120352118Smckusick }
120452118Smckusick 
120552118Smckusick /*
120652118Smckusick  *	pmap_copy_page copies the specified (machine independent)
120752118Smckusick  *	page.
120852118Smckusick  */
120952118Smckusick void
121052118Smckusick pmap_copy_page(src, dst)
1211*52741Sralph 	vm_offset_t src, dst;
121252118Smckusick {
1213*52741Sralph 	register int *s, *d, *end;
1214*52741Sralph 	register int tmp0, tmp1, tmp2, tmp3;
121552118Smckusick 
121652118Smckusick #ifdef DEBUG
121752118Smckusick 	if (pmapdebug & PDB_FOLLOW)
121852118Smckusick 		printf("pmap_copy_page(%x, %x)\n", src, dst);
121952118Smckusick #endif
1220*52741Sralph 	s = (int *)MACH_PHYS_TO_CACHED(src);
1221*52741Sralph 	d = (int *)MACH_PHYS_TO_CACHED(dst);
1222*52741Sralph 	end = s + PAGE_SIZE / sizeof(int);
122352118Smckusick 	do {
1224*52741Sralph 		tmp0 = s[0];
1225*52741Sralph 		tmp1 = s[1];
1226*52741Sralph 		tmp2 = s[2];
1227*52741Sralph 		tmp3 = s[3];
1228*52741Sralph 		d[0] = tmp0;
1229*52741Sralph 		d[1] = tmp1;
1230*52741Sralph 		d[2] = tmp2;
1231*52741Sralph 		d[3] = tmp3;
1232*52741Sralph 		s += 4;
1233*52741Sralph 		d += 4;
1234*52741Sralph 	} while (s != end);
123552118Smckusick }
123652118Smckusick 
123752118Smckusick /*
123852118Smckusick  *	Routine:	pmap_pageable
123952118Smckusick  *	Function:
124052118Smckusick  *		Make the specified pages (by pmap, offset)
124152118Smckusick  *		pageable (or not) as requested.
124252118Smckusick  *
124352118Smckusick  *		A page which is not pageable may not take
124452118Smckusick  *		a fault; therefore, its page table entry
124552118Smckusick  *		must remain valid for the duration.
124652118Smckusick  *
124752118Smckusick  *		This routine is merely advisory; pmap_enter
124852118Smckusick  *		will specify that these pages are to be wired
124952118Smckusick  *		down (or not) as appropriate.
125052118Smckusick  */
125152118Smckusick void
125252118Smckusick pmap_pageable(pmap, sva, eva, pageable)
125352118Smckusick 	pmap_t		pmap;
125452118Smckusick 	vm_offset_t	sva, eva;
125552118Smckusick 	boolean_t	pageable;
125652118Smckusick {
125752118Smckusick 
125852118Smckusick #ifdef DEBUG
125952118Smckusick 	if (pmapdebug & PDB_FOLLOW)
126052118Smckusick 		printf("pmap_pageable(%x, %x, %x, %x)\n",
126152118Smckusick 		       pmap, sva, eva, pageable);
126252118Smckusick #endif
126352118Smckusick }
126452118Smckusick 
126552118Smckusick /*
126652118Smckusick  *	Clear the modify bits on the specified physical page.
126752118Smckusick  */
126852118Smckusick void
126952118Smckusick pmap_clear_modify(pa)
127052118Smckusick 	vm_offset_t pa;
127152118Smckusick {
127252118Smckusick 	pmap_hash_t hp;
127352118Smckusick 
127452118Smckusick #ifdef DEBUG
127552118Smckusick 	if (pmapdebug & PDB_FOLLOW)
127652118Smckusick 		printf("pmap_clear_modify(%x)\n", pa);
127752118Smckusick #endif
127852118Smckusick #ifdef ATTR
127952118Smckusick 	pmap_attributes[atop(pa - KERNBASE)] &= ~PMAP_ATTR_MOD;
128052118Smckusick #endif
128152118Smckusick }
128252118Smckusick 
128352118Smckusick /*
128452118Smckusick  *	pmap_clear_reference:
128552118Smckusick  *
128652118Smckusick  *	Clear the reference bit on the specified physical page.
128752118Smckusick  */
128852118Smckusick void
128952118Smckusick pmap_clear_reference(pa)
129052118Smckusick 	vm_offset_t pa;
129152118Smckusick {
129252118Smckusick 
129352118Smckusick #ifdef DEBUG
129452118Smckusick 	if (pmapdebug & PDB_FOLLOW)
129552118Smckusick 		printf("pmap_clear_reference(%x)\n", pa);
129652118Smckusick #endif
129752118Smckusick #ifdef ATTR
129852118Smckusick 	pmap_attributes[atop(pa - KERNBASE)] &= ~PMAP_ATTR_REF;
129952118Smckusick #endif
130052118Smckusick }
130152118Smckusick 
130252118Smckusick /*
130352118Smckusick  *	pmap_is_referenced:
130452118Smckusick  *
130552118Smckusick  *	Return whether or not the specified physical page is referenced
130652118Smckusick  *	by any physical maps.
130752118Smckusick  */
130852118Smckusick boolean_t
130952118Smckusick pmap_is_referenced(pa)
131052118Smckusick 	vm_offset_t pa;
131152118Smckusick {
131252118Smckusick #ifdef ATTR
1313*52741Sralph 	return (pmap_attributes[atop(pa - KERNBASE)] & PMAP_ATTR_REF);
131452118Smckusick #else
1315*52741Sralph 	return (FALSE);
131652118Smckusick #endif
131752118Smckusick }
131852118Smckusick 
131952118Smckusick /*
132052118Smckusick  *	pmap_is_modified:
132152118Smckusick  *
132252118Smckusick  *	Return whether or not the specified physical page is modified
132352118Smckusick  *	by any physical maps.
132452118Smckusick  */
132552118Smckusick boolean_t
132652118Smckusick pmap_is_modified(pa)
132752118Smckusick 	vm_offset_t pa;
132852118Smckusick {
132952118Smckusick #ifdef ATTR
1330*52741Sralph 	return (pmap_attributes[atop(pa - KERNBASE)] & PMAP_ATTR_MOD);
133152118Smckusick #else
1332*52741Sralph 	return (FALSE);
133352118Smckusick #endif
133452118Smckusick }
133552118Smckusick 
133652118Smckusick vm_offset_t
133752118Smckusick pmap_phys_address(ppn)
133852118Smckusick 	int ppn;
133952118Smckusick {
134052118Smckusick 
134152118Smckusick #ifdef DEBUG
134252118Smckusick 	if (pmapdebug & PDB_FOLLOW)
134352118Smckusick 		printf("pmap_phys_address(%x)\n", ppn);
134452118Smckusick #endif
134552118Smckusick 	panic("pmap_phys_address"); /* XXX */
1346*52741Sralph 	return (pmax_ptob(ppn));
134752118Smckusick }
134852118Smckusick 
134952118Smckusick /*
135052118Smckusick  * Miscellaneous support routines
135152118Smckusick  */
135252118Smckusick 
135352118Smckusick /*
135452118Smckusick  * Allocate a hardware PID and return it.
135552118Smckusick  * Also, change the hardwired TLB entry to point to the current TLB cache.
135652118Smckusick  * This is called by swtch().
135752118Smckusick  */
135852118Smckusick int
135952118Smckusick pmap_alloc_tlbpid(p)
136052118Smckusick 	register struct proc *p;
136152118Smckusick {
136252118Smckusick 	register pmap_t pmap;
136352118Smckusick 	register u_int i;
136452118Smckusick 	register int id;
136552118Smckusick 
136652118Smckusick 	pmap = &p->p_vmspace->vm_pmap;
136752118Smckusick 	if ((id = pmap->pm_tlbpid) >= 0) {
136852118Smckusick 		if (pmap->pm_flags & PM_MODIFIED) {
136952118Smckusick 			pmap->pm_flags &= ~PM_MODIFIED;
137052118Smckusick 			MachTLBFlushPID(id);
137152118Smckusick 		}
137252118Smckusick 		goto done;
137352118Smckusick 	}
137452118Smckusick 
137552118Smckusick 	if ((i = whichpids[0]) != 0xFFFFFFFF)
137652118Smckusick 		id = 0;
137752118Smckusick 	else if ((i = whichpids[1]) != 0xFFFFFFFF)
137852118Smckusick 		id = 32;
137952118Smckusick 	else {
138052118Smckusick 		register struct proc *q;
138152118Smckusick 		register pmap_t q_pmap;
138252118Smckusick 
138352118Smckusick 		/*
138452118Smckusick 		 * Have to find a tlbpid to recycle.
138552118Smckusick 		 * There is probably a better way to do this.
138652118Smckusick 		 */
138752118Smckusick 		for (q = allproc; q != NULL; q = q->p_nxt) {
138852118Smckusick 			q_pmap = &q->p_vmspace->vm_pmap;
138952118Smckusick 			if ((id = q_pmap->pm_tlbpid) < 0)
139052118Smckusick 				continue;
139152118Smckusick 			if (q->p_stat != SRUN)
139252118Smckusick 				goto fnd;
139352118Smckusick 		}
139452118Smckusick 		if (id < 0)
139552118Smckusick 			panic("TLBPidAlloc");
139652118Smckusick 	fnd:
139752118Smckusick 		printf("pmap_alloc_tlbpid: recycle pid %d (%s) tlbpid %d\n",
139852118Smckusick 			q->p_pid, q->p_comm, id); /* XXX */
139952118Smckusick 		/*
140052118Smckusick 		 * Even though the virtual to physical mapping hasn't changed,
140152118Smckusick 		 * we need to clear the PID tag in the high entry of the cache.
140252118Smckusick 		 */
140352118Smckusick 		if (q_pmap->pm_hash != zero_pmap_hash) {
140452118Smckusick 			register pmap_hash_t hp;
140552118Smckusick 
140652118Smckusick 			hp = q_pmap->pm_hash;
140752118Smckusick 			for (i = 0; i < PMAP_HASH_NUM_ENTRIES; i++, hp++) {
140852118Smckusick 				if (!hp->high)
140952118Smckusick 					continue;
141052118Smckusick 
141152118Smckusick 				if (hp->low & PG_WIRED) {
141252118Smckusick 					printf("Clearing wired user entry! h %x l %x\n", hp->high, hp->low);
141352118Smckusick 					panic("pmap_alloc_tlbpid: wired");
141452118Smckusick 				}
141552118Smckusick 				pmap_remove_pv(pmap, hp->high & PG_FRAME,
141652118Smckusick 					hp->low & PG_FRAME);
141752118Smckusick 				hp->high = 0;
141852118Smckusick 				q_pmap->pm_stats.resident_count--;
141952118Smckusick 			}
142052118Smckusick 		}
142152118Smckusick 		q_pmap->pm_tlbpid = -1;
142252118Smckusick 		MachTLBFlushPID(id);
142352118Smckusick #ifdef DEBUG
142452118Smckusick 		remove_stats.pidflushes++;
142552118Smckusick #endif
142652118Smckusick 		pmap->pm_tlbpid = id;
142752118Smckusick 		goto done;
142852118Smckusick 	}
142952118Smckusick 	while (i & 1) {
143052118Smckusick 		i >>= 1;
143152118Smckusick 		id++;
143252118Smckusick 	}
143352118Smckusick 	whichpids[id >> 5] |= 1 << (id & 0x1F);
143452118Smckusick 	pmap->pm_tlbpid = id;
143552118Smckusick done:
143652118Smckusick 	/*
143752118Smckusick 	 * Map in new TLB cache.
143852118Smckusick 	 */
143952118Smckusick 	if (pmap == cur_pmap)
144052118Smckusick 		return (id);
144152118Smckusick 	cur_pmap = pmap;
144252118Smckusick 	for (i = 0; i < PMAP_HASH_UPAGES; i++) {
144352118Smckusick 		MachTLBWriteIndexed(i + UPAGES,
144452118Smckusick 			(PMAP_HASH_UADDR + (i << PGSHIFT)) |
144552118Smckusick 				(id << VMMACH_TLB_PID_SHIFT),
144652118Smckusick 			pmap->pm_hash_ptes[i]);
144752118Smckusick 	}
144852118Smckusick 	return (id);
144952118Smckusick }
145052118Smckusick 
145152118Smckusick /*
145252118Smckusick  * Remove a physical to virtual address translation.
145352118Smckusick  */
145452118Smckusick void
145552118Smckusick pmap_remove_pv(pmap, va, pa)
145652118Smckusick 	pmap_t pmap;
145752118Smckusick 	vm_offset_t va, pa;
145852118Smckusick {
145952118Smckusick 	register pv_entry_t pv, npv;
146052118Smckusick 	int s;
146152118Smckusick 
146252118Smckusick #ifdef DEBUG
146352118Smckusick 	if (pmapdebug & PDB_FOLLOW)
146452118Smckusick 		printf("pmap_remove_pv(%x, %x, %x)\n", pmap, va, pa);
146552118Smckusick #endif
146652118Smckusick 	/*
146752118Smckusick 	 * Remove page from the PV table (raise IPL since we
146852118Smckusick 	 * may be called at interrupt time).
146952118Smckusick 	 */
147052118Smckusick 	if (!IS_VM_PHYSADDR(pa))
147152118Smckusick 		return;
147252118Smckusick 	pv = pa_to_pvh(pa);
147352118Smckusick 	s = splimp();
147452118Smckusick 	/*
147552118Smckusick 	 * If it is the first entry on the list, it is actually
147652118Smckusick 	 * in the header and we must copy the following entry up
147752118Smckusick 	 * to the header.  Otherwise we must search the list for
147852118Smckusick 	 * the entry.  In either case we free the now unused entry.
147952118Smckusick 	 */
148052118Smckusick 	if (pmap == pv->pv_pmap && va == pv->pv_va) {
148152118Smckusick 		npv = pv->pv_next;
148252118Smckusick 		if (npv) {
148352118Smckusick 			*pv = *npv;
148452118Smckusick 			free((caddr_t)npv, M_VMPVENT);
148552118Smckusick 		} else
148652118Smckusick 			pv->pv_pmap = NULL;
148752118Smckusick #ifdef DEBUG
148852118Smckusick 		remove_stats.pvfirst++;
148952118Smckusick #endif
149052118Smckusick 	} else {
149152118Smckusick 		for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
149252118Smckusick #ifdef DEBUG
149352118Smckusick 			remove_stats.pvsearch++;
149452118Smckusick #endif
149552118Smckusick 			if (pmap == npv->pv_pmap && va == npv->pv_va)
149652118Smckusick 				goto fnd;
149752118Smckusick 		}
149852118Smckusick #ifdef DIAGNOSTIC
149952118Smckusick 		printf("pmap_remove_pv(%x, %x, %x) not found\n", pmap, va, pa);
150052118Smckusick 		panic("pmap_remove_pv");
150152118Smckusick #endif
150252118Smckusick 	fnd:
150352118Smckusick 		pv->pv_next = npv->pv_next;
150452118Smckusick 		free((caddr_t)npv, M_VMPVENT);
150552118Smckusick 	}
150652118Smckusick 	splx(s);
150752118Smckusick }
150852118Smckusick 
150952118Smckusick #ifdef DEBUG
151052118Smckusick pmap_print(pmap)
151152118Smckusick 	pmap_t pmap;
151252118Smckusick {
151352118Smckusick 	register pmap_hash_t hp;
151452118Smckusick 	register int i;
151552118Smckusick 
151652118Smckusick 	printf("\tpmap_print(%x)\n", pmap);
151752118Smckusick 
151852118Smckusick 	if (pmap->pm_hash == zero_pmap_hash) {
151952118Smckusick 		printf("pm_hash == zero\n");
152052118Smckusick 		return;
152152118Smckusick 	}
152252118Smckusick 	if (pmap->pm_hash == (pmap_hash_t)0) {
152352118Smckusick 		printf("pm_hash == kernel\n");
152452118Smckusick 		return;
152552118Smckusick 	}
152652118Smckusick 	hp = pmap->pm_hash;
152752118Smckusick 	for (i = 0; i < PMAP_HASH_NUM_ENTRIES; i++, hp++) {
152852118Smckusick 		if (!hp->high)
152952118Smckusick 			continue;
153052118Smckusick 		printf("%d: hi %x low %x\n", i, hp->high, hp->low);
153152118Smckusick 	}
153252118Smckusick }
153352118Smckusick #endif
1534