xref: /csrg-svn/sys/pmax/pmax/pmap.c (revision 53611)
152118Smckusick /*
252118Smckusick  * Copyright (c) 1992 The Regents of the University of California.
352118Smckusick  * All rights reserved.
452118Smckusick  *
552118Smckusick  * This code is derived from software contributed to Berkeley by
652118Smckusick  * the Systems Programming Group of the University of Utah Computer
752118Smckusick  * Science Department and Ralph Campbell.
852118Smckusick  *
952118Smckusick  * %sccs.include.redist.c%
1052118Smckusick  *
11*53611Sralph  *	@(#)pmap.c	7.8 (Berkeley) 05/16/92
1252118Smckusick  */
1352118Smckusick 
1452118Smckusick /*
1552118Smckusick  *	Manages physical address maps.
1652118Smckusick  *
1752118Smckusick  *	In addition to hardware address maps, this
1852118Smckusick  *	module is called upon to provide software-use-only
1952118Smckusick  *	maps which may or may not be stored in the same
2052118Smckusick  *	form as hardware maps.  These pseudo-maps are
2152118Smckusick  *	used to store intermediate results from copy
2252118Smckusick  *	operations to and from address spaces.
2352118Smckusick  *
2452118Smckusick  *	Since the information managed by this module is
2552118Smckusick  *	also stored by the logical address mapping module,
2652118Smckusick  *	this module may throw away valid virtual-to-physical
2752118Smckusick  *	mappings at almost any time.  However, invalidations
2852118Smckusick  *	of virtual-to-physical mappings must be done as
2952118Smckusick  *	requested.
3052118Smckusick  *
3152118Smckusick  *	In order to cope with hardware architectures which
3252118Smckusick  *	make virtual-to-physical map invalidates expensive,
3352118Smckusick  *	this module may delay invalidate or reduced protection
3452118Smckusick  *	operations until such time as they are actually
3552118Smckusick  *	necessary.  This module is given full information as
3652118Smckusick  *	to which processors are currently using which maps,
3752118Smckusick  *	and to when physical maps must be made correct.
3852118Smckusick  */
3952118Smckusick 
4052118Smckusick #include "param.h"
4152118Smckusick #include "proc.h"
4252118Smckusick #include "malloc.h"
4352118Smckusick #include "user.h"
4452118Smckusick 
4552118Smckusick #include "vm/vm.h"
4652118Smckusick #include "vm/vm_kern.h"
4752118Smckusick #include "vm/vm_page.h"
4852118Smckusick 
4952118Smckusick #include "../include/machConst.h"
5052741Sralph #include "../include/pte.h"
5152118Smckusick 
5252118Smckusick /*
5352118Smckusick  * For each vm_page_t, there is a list of all currently valid virtual
5452118Smckusick  * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
5552118Smckusick  * XXX really should do this as a part of the higher level code.
5652118Smckusick  */
5752118Smckusick typedef struct pv_entry {
5852118Smckusick 	struct pv_entry	*pv_next;	/* next pv_entry */
5952118Smckusick 	struct pmap	*pv_pmap;	/* pmap where mapping lies */
6052118Smckusick 	vm_offset_t	pv_va;		/* virtual address for mapping */
6152118Smckusick } *pv_entry_t;
6252118Smckusick 
6352118Smckusick pv_entry_t	pv_table;	/* array of entries, one per page */
6452118Smckusick extern void	pmap_remove_pv();
6552118Smckusick 
6652118Smckusick #define pa_index(pa)		atop((pa) - first_phys_addr)
6752118Smckusick #define pa_to_pvh(pa)		(&pv_table[pa_index(pa)])
6852118Smckusick 
6952118Smckusick #ifdef DEBUG
7052118Smckusick struct {
7152118Smckusick 	int kernel;	/* entering kernel mapping */
7252118Smckusick 	int user;	/* entering user mapping */
7352118Smckusick 	int ptpneeded;	/* needed to allocate a PT page */
7452118Smckusick 	int pwchange;	/* no mapping change, just wiring or protection */
7552118Smckusick 	int wchange;	/* no mapping change, just wiring */
7652118Smckusick 	int mchange;	/* was mapped but mapping to different page */
7752118Smckusick 	int managed;	/* a managed page */
7852118Smckusick 	int firstpv;	/* first mapping for this PA */
7952118Smckusick 	int secondpv;	/* second mapping for this PA */
8052118Smckusick 	int ci;		/* cache inhibited */
8152118Smckusick 	int unmanaged;	/* not a managed page */
8252118Smckusick 	int flushes;	/* cache flushes */
8352118Smckusick 	int cachehit;	/* new entry forced valid entry out */
8452118Smckusick } enter_stats;
8552118Smckusick struct {
8652118Smckusick 	int calls;
8752118Smckusick 	int removes;
8852118Smckusick 	int flushes;
8952118Smckusick 	int pidflushes;	/* HW pid stolen */
9052118Smckusick 	int pvfirst;
9152118Smckusick 	int pvsearch;
9252118Smckusick } remove_stats;
9352118Smckusick 
9452118Smckusick int pmapdebug;
9552118Smckusick #define PDB_FOLLOW	0x0001
9652118Smckusick #define PDB_INIT	0x0002
9752118Smckusick #define PDB_ENTER	0x0004
9852118Smckusick #define PDB_REMOVE	0x0008
9952118Smckusick #define PDB_CREATE	0x0010
10052118Smckusick #define PDB_PTPAGE	0x0020
10152118Smckusick #define PDB_CACHE	0x0040
10252118Smckusick #define PDB_BITS	0x0080
10352118Smckusick #define PDB_COLLECT	0x0100
10452118Smckusick #define PDB_PROTECT	0x0200
10552118Smckusick #define PDB_TLBPID	0x0400
10652118Smckusick #define PDB_PARANOIA	0x2000
10752118Smckusick #define PDB_WIRING	0x4000
10852118Smckusick #define PDB_PVDUMP	0x8000
10952118Smckusick 
11052118Smckusick #endif /* DEBUG */
11152118Smckusick 
11252118Smckusick u_int	whichpids[2] = {	/* bit mask of hardware PID's in use */
11352118Smckusick 	3, 0
11452118Smckusick };
11552118Smckusick 
11652118Smckusick struct pmap	kernel_pmap_store;
11752118Smckusick pmap_t		cur_pmap;	/* current pmap mapped in hardware */
11852118Smckusick 
11952118Smckusick vm_offset_t    	avail_start;	/* PA of first available physical page */
12052118Smckusick vm_offset_t	avail_end;	/* PA of last available physical page */
12152118Smckusick vm_size_t	mem_size;	/* memory size in bytes */
12252118Smckusick vm_offset_t	virtual_avail;  /* VA of first avail page (after kernel bss)*/
12352118Smckusick vm_offset_t	virtual_end;	/* VA of last avail page (end of kernel AS) */
12452118Smckusick int		pmaxpagesperpage;	/* PAGE_SIZE / NBPG */
12552118Smckusick #ifdef ATTR
12652118Smckusick char		*pmap_attributes;	/* reference and modify bits */
12752118Smckusick #endif
12852118Smckusick pmap_hash_t	zero_pmap_hash;		/* empty TLB hash table for init */
12952118Smckusick 
13052118Smckusick /*
13152118Smckusick  *	Bootstrap the system enough to run with virtual memory.
13252118Smckusick  */
13352118Smckusick void
13452118Smckusick pmap_bootstrap(firstaddr)
13552118Smckusick 	vm_offset_t firstaddr;
13652118Smckusick {
13752118Smckusick 	register int i;
13852118Smckusick 	vm_offset_t start = firstaddr;
13952118Smckusick 	extern int maxmem, physmem;
14052118Smckusick 
14152118Smckusick 	/*
14252118Smckusick 	 * Allocate a TLB hash table for the kernel.
14352118Smckusick 	 * This could be a KSEG0 address and thus save TLB entries but
14452118Smckusick 	 * its faster and simpler in assembly language to have a
14552118Smckusick 	 * fixed address that can be accessed with a 16 bit signed offset.
14652118Smckusick 	 * Note: the kernel pm_hash field is null, user pm_hash fields are
14752118Smckusick 	 * either the table or zero_pmap_hash.
14852118Smckusick 	 */
14952118Smckusick 	kernel_pmap_store.pm_hash = (pmap_hash_t)0;
15052118Smckusick 	for (i = 0; i < PMAP_HASH_KPAGES; i++) {
15152118Smckusick 		MachTLBWriteIndexed(i + UPAGES + PMAP_HASH_UPAGES,
15252118Smckusick 			PMAP_HASH_KADDR + (i << PGSHIFT),
15352118Smckusick 			firstaddr | PG_V | PG_M | PG_G);
15452118Smckusick 		firstaddr += NBPG;
15552118Smckusick 	}
15652118Smckusick 
15752118Smckusick 	/*
15852118Smckusick 	 * Allocate an empty TLB hash table for initial pmap's.
15952118Smckusick 	 */
16052741Sralph 	zero_pmap_hash = (pmap_hash_t)MACH_PHYS_TO_CACHED(firstaddr);
16152118Smckusick 
16252118Smckusick 	/* init proc[0]'s pmap hash table */
16352118Smckusick 	for (i = 0; i < PMAP_HASH_UPAGES; i++) {
16452937Sralph 		kernel_pmap_store.pm_hash_ptes[i] = firstaddr | PG_V | PG_RO;
16552118Smckusick 		MachTLBWriteIndexed(i + UPAGES,
16652118Smckusick 			(PMAP_HASH_UADDR + (i << PGSHIFT)) |
16752118Smckusick 				(1 << VMMACH_TLB_PID_SHIFT),
16852118Smckusick 			kernel_pmap_store.pm_hash_ptes[i]);
16952937Sralph 		firstaddr += NBPG;
17052118Smckusick 	}
17152118Smckusick 
17252118Smckusick 	/*
17352118Smckusick 	 * Allocate memory for pv_table.
17452118Smckusick 	 * This will allocate more entries than we really need.
17552118Smckusick 	 * We should do this in pmap_init when we know the actual
17652118Smckusick 	 * phys_start and phys_end but its better to use phys addresses
17752118Smckusick 	 * rather than kernel virtual addresses mapped through the TLB.
17852118Smckusick 	 */
17952118Smckusick 	i = (maxmem - pmax_btop(firstaddr)) * sizeof(struct pv_entry);
18052118Smckusick 	i = pmax_round_page(i);
18152741Sralph 	pv_table = (pv_entry_t)MACH_PHYS_TO_CACHED(firstaddr);
18252118Smckusick 	firstaddr += i;
18352118Smckusick 
18452118Smckusick 	/*
18552118Smckusick 	 * Clear allocated memory.
18652118Smckusick 	 */
18752741Sralph 	bzero((caddr_t)MACH_PHYS_TO_CACHED(start), firstaddr - start);
18852118Smckusick 
18952118Smckusick 	avail_start = firstaddr;
19052118Smckusick 	avail_end = pmax_ptob(maxmem);
19152118Smckusick 	mem_size = avail_end - avail_start;
19252118Smckusick 
19352118Smckusick 	virtual_avail = VM_MIN_KERNEL_ADDRESS;
19452118Smckusick 	virtual_end = VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES * NPTEPG * NBPG;
19552118Smckusick 	/* XXX need to decide how to set cnt.v_page_size */
19652118Smckusick 	pmaxpagesperpage = 1;
19752118Smckusick 
19852606Smckusick 	cur_pmap = &kernel_pmap_store;
19952937Sralph 	simple_lock_init(&kernel_pmap_store.pm_lock);
20052937Sralph 	kernel_pmap_store.pm_count = 1;
20152118Smckusick }
20252118Smckusick 
20352118Smckusick /*
20452118Smckusick  * Bootstrap memory allocator. This function allows for early dynamic
20552118Smckusick  * memory allocation until the virtual memory system has been bootstrapped.
20652118Smckusick  * After that point, either kmem_alloc or malloc should be used. This
20752118Smckusick  * function works by stealing pages from the (to be) managed page pool,
20852118Smckusick  * stealing virtual address space, then mapping the pages and zeroing them.
20952118Smckusick  *
21052118Smckusick  * It should be used from pmap_bootstrap till vm_page_startup, afterwards
21152118Smckusick  * it cannot be used, and will generate a panic if tried. Note that this
21252118Smckusick  * memory will never be freed, and in essence it is wired down.
21352118Smckusick  */
21452118Smckusick void *
21552118Smckusick pmap_bootstrap_alloc(size)
21652118Smckusick 	int size;
21752118Smckusick {
21852118Smckusick 	vm_offset_t val;
21952118Smckusick 	extern boolean_t vm_page_startup_initialized;
22052118Smckusick 
22152118Smckusick 	if (vm_page_startup_initialized)
22252118Smckusick 		panic("pmap_bootstrap_alloc: called after startup initialized");
22352118Smckusick 
22452741Sralph 	val = MACH_PHYS_TO_CACHED(avail_start);
22552118Smckusick 	size = round_page(size);
22652118Smckusick 	avail_start += size;
22752118Smckusick 
22852741Sralph 	blkclr((caddr_t)val, size);
22952741Sralph 	return ((void *)val);
23052118Smckusick }
23152118Smckusick 
23252118Smckusick /*
23352118Smckusick  *	Initialize the pmap module.
23452118Smckusick  *	Called by vm_init, to initialize any structures that the pmap
23552118Smckusick  *	system needs to map virtual memory.
23652118Smckusick  */
23752118Smckusick void
23852118Smckusick pmap_init(phys_start, phys_end)
23952118Smckusick 	vm_offset_t phys_start, phys_end;
24052118Smckusick {
24152118Smckusick 
24252118Smckusick #ifdef DEBUG
24352118Smckusick 	if (pmapdebug & PDB_FOLLOW)
24452118Smckusick 		printf("pmap_init(%x, %x)\n", phys_start, phys_end);
24552118Smckusick #endif
24652118Smckusick }
24752118Smckusick 
24852118Smckusick /*
24952118Smckusick  *	Used to map a range of physical addresses into kernel
25052118Smckusick  *	virtual address space.
25152118Smckusick  *
25252118Smckusick  *	This routine should only be called by vm_page_startup()
25352118Smckusick  *	with KSEG0 addresses.
25452118Smckusick  */
25552118Smckusick vm_offset_t
25652118Smckusick pmap_map(virt, start, end, prot)
25752118Smckusick 	vm_offset_t virt;
25852118Smckusick 	vm_offset_t start;
25952118Smckusick 	vm_offset_t end;
26052118Smckusick 	int prot;
26152118Smckusick {
26252118Smckusick 
26352118Smckusick #ifdef DEBUG
26452118Smckusick 	if (pmapdebug & PDB_FOLLOW)
26552118Smckusick 		printf("pmap_map(%x, %x, %x, %x)\n", virt, start, end, prot);
26652118Smckusick #endif
26752118Smckusick 
26852741Sralph 	return (round_page(end));
26952118Smckusick }
27052118Smckusick 
27152118Smckusick /*
27252118Smckusick  *	Create and return a physical map.
27352118Smckusick  *
27452118Smckusick  *	If the size specified for the map
27552118Smckusick  *	is zero, the map is an actual physical
27652118Smckusick  *	map, and may be referenced by the
27752118Smckusick  *	hardware.
27852118Smckusick  *
27952118Smckusick  *	If the size specified is non-zero,
28052118Smckusick  *	the map will be used in software only, and
28152118Smckusick  *	is bounded by that size.
28252118Smckusick  */
28352118Smckusick pmap_t
28452118Smckusick pmap_create(size)
28552118Smckusick 	vm_size_t size;
28652118Smckusick {
28752118Smckusick 	register pmap_t pmap;
28852118Smckusick 
28952118Smckusick #ifdef DEBUG
29052118Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
29152118Smckusick 		printf("pmap_create(%x)\n", size);
29252118Smckusick #endif
29352118Smckusick 	/*
29452118Smckusick 	 * Software use map does not need a pmap
29552118Smckusick 	 */
29652118Smckusick 	if (size)
29752741Sralph 		return (NULL);
29852118Smckusick 
29952118Smckusick 	printf("pmap_create(%x) XXX\n", size); /* XXX */
30052118Smckusick 	/* XXX: is it ok to wait here? */
30152118Smckusick 	pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
30252118Smckusick #ifdef notifwewait
30352118Smckusick 	if (pmap == NULL)
30452118Smckusick 		panic("pmap_create: cannot allocate a pmap");
30552118Smckusick #endif
30652118Smckusick 	bzero(pmap, sizeof(*pmap));
30752118Smckusick 	pmap_pinit(pmap);
30852118Smckusick 	return (pmap);
30952118Smckusick }
31052118Smckusick 
31152118Smckusick /*
31252118Smckusick  * Initialize a preallocated and zeroed pmap structure,
31352118Smckusick  * such as one in a vmspace structure.
31452118Smckusick  */
31552118Smckusick void
31652118Smckusick pmap_pinit(pmap)
31752118Smckusick 	register struct pmap *pmap;
31852118Smckusick {
31952118Smckusick 	register int i;
32052118Smckusick 	extern struct vmspace vmspace0;
32152118Smckusick 
32252118Smckusick #ifdef DEBUG
32352118Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
32452118Smckusick 		printf("pmap_pinit(%x)\n", pmap);
32552118Smckusick #endif
32652118Smckusick 	simple_lock_init(&pmap->pm_lock);
32752118Smckusick 	pmap->pm_count = 1;
32852118Smckusick 	pmap->pm_flags = 0;
32952118Smckusick 	pmap->pm_hash = zero_pmap_hash;
33052118Smckusick 	for (i = 0; i < PMAP_HASH_UPAGES; i++)
33152118Smckusick 		pmap->pm_hash_ptes[i] =
33252937Sralph 			(MACH_CACHED_TO_PHYS(zero_pmap_hash) + (i << PGSHIFT)) |
33352937Sralph 				PG_V | PG_RO;
33452118Smckusick 	if (pmap == &vmspace0.vm_pmap)
33552118Smckusick 		pmap->pm_tlbpid = 1;	/* preallocated in mach_init() */
33652118Smckusick 	else
33752118Smckusick 		pmap->pm_tlbpid = -1;	/* none allocated yet */
33852118Smckusick }
33952118Smckusick 
34052118Smckusick /*
34152118Smckusick  *	Retire the given physical map from service.
34252118Smckusick  *	Should only be called if the map contains
34352118Smckusick  *	no valid mappings.
34452118Smckusick  */
34552118Smckusick void
34652118Smckusick pmap_destroy(pmap)
34752118Smckusick 	register pmap_t pmap;
34852118Smckusick {
34952118Smckusick 	int count;
35052118Smckusick 
35152118Smckusick #ifdef DEBUG
35252118Smckusick 	if (pmapdebug & PDB_FOLLOW)
35352118Smckusick 		printf("pmap_destroy(%x)\n", pmap);
35452118Smckusick #endif
35552118Smckusick 	if (pmap == NULL)
35652118Smckusick 		return;
35752118Smckusick 
35852118Smckusick 	printf("pmap_destroy(%x) XXX\n", pmap); /* XXX */
35952118Smckusick 	simple_lock(&pmap->pm_lock);
36052118Smckusick 	count = --pmap->pm_count;
36152118Smckusick 	simple_unlock(&pmap->pm_lock);
36252118Smckusick 	if (count == 0) {
36352118Smckusick 		pmap_release(pmap);
36452118Smckusick 		free((caddr_t)pmap, M_VMPMAP);
36552118Smckusick 	}
36652118Smckusick }
36752118Smckusick 
36852118Smckusick /*
36952118Smckusick  * Release any resources held by the given physical map.
37052118Smckusick  * Called when a pmap initialized by pmap_pinit is being released.
37152118Smckusick  * Should only be called if the map contains no valid mappings.
37252118Smckusick  */
37352118Smckusick void
37452118Smckusick pmap_release(pmap)
37552118Smckusick 	register pmap_t pmap;
37652118Smckusick {
37752118Smckusick 	register int id;
37852118Smckusick #ifdef DIAGNOSTIC
37952118Smckusick 	register int i;
38052118Smckusick #endif
38152118Smckusick 
38252118Smckusick #ifdef DEBUG
38352118Smckusick 	if (pmapdebug & PDB_FOLLOW)
38452118Smckusick 		printf("pmap_release(%x)\n", pmap);
38552118Smckusick #endif
38652118Smckusick 
38752118Smckusick 	if (pmap->pm_hash && pmap->pm_hash != zero_pmap_hash) {
38852118Smckusick 		kmem_free(kernel_map, (vm_offset_t)pmap->pm_hash,
38952118Smckusick 			PMAP_HASH_SIZE);
39052118Smckusick 		pmap->pm_hash = zero_pmap_hash;
39152118Smckusick 	}
39252118Smckusick 	if ((id = pmap->pm_tlbpid) < 0)
39352118Smckusick 		return;
39452118Smckusick #ifdef DIAGNOSTIC
39552118Smckusick 	if (!(whichpids[id >> 5] & (1 << (id & 0x1F))))
39652118Smckusick 		panic("pmap_release: id free");
39752118Smckusick #endif
39852118Smckusick 	MachTLBFlushPID(id);
39952118Smckusick 	whichpids[id >> 5] &= ~(1 << (id & 0x1F));
40052118Smckusick 	pmap->pm_flags &= ~PM_MODIFIED;
40152118Smckusick 	pmap->pm_tlbpid = -1;
40252118Smckusick 	if (pmap == cur_pmap)
40352118Smckusick 		cur_pmap = (pmap_t)0;
40452118Smckusick #ifdef DIAGNOSTIC
40552118Smckusick 	/* invalidate user PTE cache */
40652118Smckusick 	for (i = 0; i < PMAP_HASH_UPAGES; i++)
40752118Smckusick 		MachTLBWriteIndexed(i + UPAGES, MACH_RESERVED_ADDR, 0);
40852118Smckusick #endif
40952118Smckusick }
41052118Smckusick 
41152118Smckusick /*
41252118Smckusick  *	Add a reference to the specified pmap.
41352118Smckusick  */
41452118Smckusick void
41552118Smckusick pmap_reference(pmap)
41652118Smckusick 	pmap_t pmap;
41752118Smckusick {
41852118Smckusick 
41952118Smckusick #ifdef DEBUG
42052118Smckusick 	if (pmapdebug & PDB_FOLLOW)
42152118Smckusick 		printf("pmap_reference(%x)\n", pmap);
42252118Smckusick #endif
42352118Smckusick 	if (pmap != NULL) {
42452118Smckusick 		simple_lock(&pmap->pm_lock);
42552118Smckusick 		pmap->pm_count++;
42652118Smckusick 		simple_unlock(&pmap->pm_lock);
42752118Smckusick 	}
42852118Smckusick }
42952118Smckusick 
43052118Smckusick /*
43152118Smckusick  *	Remove the given range of addresses from the specified map.
43252118Smckusick  *
43352118Smckusick  *	It is assumed that the start and end are properly
43452118Smckusick  *	rounded to the page size.
43552118Smckusick  */
43652118Smckusick void
43752118Smckusick pmap_remove(pmap, sva, eva)
43852118Smckusick 	register pmap_t pmap;
43952118Smckusick 	vm_offset_t sva, eva;
44052118Smckusick {
44152118Smckusick 	register vm_offset_t va;
44252118Smckusick 	register pv_entry_t pv, npv;
44352118Smckusick 	pmap_hash_t hp;
44452118Smckusick 	unsigned entry;
44552118Smckusick 
44652118Smckusick #ifdef DEBUG
44752118Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
44852118Smckusick 		printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva);
44952118Smckusick 	remove_stats.calls++;
45052118Smckusick #endif
45152118Smckusick 	if (pmap == NULL)
45252118Smckusick 		return;
45352118Smckusick 
45452118Smckusick 	/* anything in the cache? */
45552118Smckusick 	if (pmap->pm_tlbpid < 0 || pmap->pm_hash == zero_pmap_hash)
45652118Smckusick 		return;
45752118Smckusick 
45852118Smckusick 	if (!pmap->pm_hash) {
45952118Smckusick 		register pt_entry_t *pte;
46052118Smckusick 
46152118Smckusick 		/* remove entries from kernel pmap */
46252741Sralph #ifdef DIAGNOSTIC
46352741Sralph 		if (sva < VM_MIN_KERNEL_ADDRESS ||
46452741Sralph 		    eva > VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES*NPTEPG*NBPG)
46552741Sralph 			panic("pmap_remove");
46652741Sralph #endif
46752118Smckusick 		pte = kvtopte(sva);
46852118Smckusick 		for (va = sva; va < eva; va += NBPG, pte++) {
46952118Smckusick 			entry = pte->pt_entry;
47052118Smckusick 			if (!(entry & PG_V))
47152118Smckusick 				continue;
47252118Smckusick 			if (entry & PG_WIRED)
47352118Smckusick 				pmap->pm_stats.wired_count--;
47452118Smckusick 			pmap->pm_stats.resident_count--;
47552118Smckusick 			pmap_remove_pv(pmap, va, entry & PG_FRAME);
47652118Smckusick #ifdef ATTR
47752118Smckusick 			pmap_attributes[atop(entry - KERNBASE)] = 0;
47852118Smckusick #endif
47952118Smckusick 			pte->pt_entry = PG_NV;
48052118Smckusick 			/*
48152118Smckusick 			 * Flush the TLB for the given address.
48252118Smckusick 			 */
48352118Smckusick 			MachTLBFlushAddr(va);
48452118Smckusick #ifdef DEBUG
48552118Smckusick 			remove_stats.flushes++;
48652118Smckusick #endif
48752118Smckusick 		}
48852118Smckusick 		return;
48952118Smckusick 	}
49052118Smckusick 
49152118Smckusick 	va = sva | (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
49252118Smckusick 	eva |= (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
49352118Smckusick 	/*
49452118Smckusick 	 * If we are not in the current address space, just flush the
49552118Smckusick 	 * software cache and not the hardware.
49652118Smckusick 	 */
49752118Smckusick 	if (pmap != cur_pmap) {
49852118Smckusick 		for (; va < eva; va += NBPG) {
49952118Smckusick 			hp = &pmap->pm_hash[PMAP_HASH(va)];
50052118Smckusick 			if (hp->high != va)
50152118Smckusick 				continue;
50252118Smckusick 
50352118Smckusick 			hp->high = 0;
50452118Smckusick 			entry = hp->low;
50552118Smckusick 			if (entry & PG_WIRED)
50652118Smckusick 				pmap->pm_stats.wired_count--;
50752118Smckusick 			pmap->pm_stats.resident_count--;
50852118Smckusick 			pmap_remove_pv(pmap, va & PG_FRAME, entry & PG_FRAME);
50952118Smckusick #ifdef ATTR
51052118Smckusick 			pmap_attributes[atop(entry - KERNBASE)] = 0;
51152118Smckusick #endif
51252118Smckusick 			pmap->pm_flags |= PM_MODIFIED;
51352118Smckusick #ifdef DEBUG
51452118Smckusick 			remove_stats.removes++;
51552118Smckusick #endif
51652118Smckusick 		}
51752118Smckusick 		return;
51852118Smckusick 	}
51952118Smckusick 
52052118Smckusick 	for (; va < eva; va += NBPG) {
52152118Smckusick 		hp = &pmap->pm_hash[PMAP_HASH(va)];
52252118Smckusick 		if (hp->high != va)
52352118Smckusick 			continue;
52452118Smckusick 
52552118Smckusick 		hp->high = 0;
52652118Smckusick 		entry = hp->low;
52752118Smckusick 		if (entry & PG_WIRED)
52852118Smckusick 			pmap->pm_stats.wired_count--;
52952118Smckusick 		pmap->pm_stats.resident_count--;
53052118Smckusick 		pmap_remove_pv(pmap, va & PG_FRAME, entry & PG_FRAME);
53152118Smckusick #ifdef ATTR
53252118Smckusick 		pmap_attributes[atop(entry - KERNBASE)] = 0;
53352118Smckusick #endif
53452118Smckusick 		/*
53552118Smckusick 		 * Flush the TLB for the given address.
53652118Smckusick 		 */
53752118Smckusick 		MachTLBFlushAddr(va);
53852118Smckusick #ifdef DEBUG
53952118Smckusick 		remove_stats.flushes++;
54052118Smckusick #endif
54152118Smckusick 	}
54252118Smckusick }
54352118Smckusick 
54452118Smckusick /*
54552118Smckusick  *	pmap_page_protect:
54652118Smckusick  *
54752118Smckusick  *	Lower the permission for all mappings to a given page.
54852118Smckusick  */
54952118Smckusick void
55052118Smckusick pmap_page_protect(pa, prot)
55152118Smckusick 	vm_offset_t pa;
55252118Smckusick 	vm_prot_t prot;
55352118Smckusick {
55452118Smckusick 	register pv_entry_t pv;
55552118Smckusick 	register vm_offset_t va;
55652118Smckusick 	int s;
55752118Smckusick 
55852118Smckusick #ifdef DEBUG
55952118Smckusick 	if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
56052118Smckusick 	    prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))
56152118Smckusick 		printf("pmap_page_protect(%x, %x)\n", pa, prot);
56252118Smckusick #endif
56352118Smckusick 	if (!IS_VM_PHYSADDR(pa))
56452118Smckusick 		return;
56552118Smckusick 
56652118Smckusick 	switch (prot) {
56752118Smckusick 	case VM_PROT_ALL:
56852118Smckusick 		break;
56952118Smckusick 
57052118Smckusick 	/* copy_on_write */
57152118Smckusick 	case VM_PROT_READ:
57252118Smckusick 	case VM_PROT_READ|VM_PROT_EXECUTE:
57352118Smckusick 		pv = pa_to_pvh(pa);
57452118Smckusick 		s = splimp();
57552118Smckusick 		/*
57652118Smckusick 		 * Loop over all current mappings setting/clearing as appropos.
57752118Smckusick 		 */
57852118Smckusick 		if (pv->pv_pmap != NULL) {
57952118Smckusick 			for (; pv; pv = pv->pv_next) {
58052118Smckusick 				extern vm_offset_t pager_sva, pager_eva;
58152118Smckusick 				va = pv->pv_va;
58252118Smckusick 
58352118Smckusick 				/*
58452118Smckusick 				 * XXX don't write protect pager mappings
58552118Smckusick 				 */
58652118Smckusick 				if (va >= pager_sva && va < pager_eva)
58752118Smckusick 					continue;
58852118Smckusick 				pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE,
58952118Smckusick 					prot);
59052118Smckusick 			}
59152118Smckusick 		}
59252118Smckusick 		splx(s);
59352118Smckusick 		break;
59452118Smckusick 
59552118Smckusick 	/* remove_all */
59652118Smckusick 	default:
59752118Smckusick 		pv = pa_to_pvh(pa);
59852118Smckusick 		s = splimp();
59952118Smckusick 		while (pv->pv_pmap != NULL) {
60052118Smckusick 			pmap_remove(pv->pv_pmap, pv->pv_va,
60152118Smckusick 				    pv->pv_va + PAGE_SIZE);
60252118Smckusick 		}
60352118Smckusick 		splx(s);
60452118Smckusick 	}
60552118Smckusick }
60652118Smckusick 
60752118Smckusick /*
60852118Smckusick  *	Set the physical protection on the
60952118Smckusick  *	specified range of this map as requested.
61052118Smckusick  */
61152118Smckusick void
61252118Smckusick pmap_protect(pmap, sva, eva, prot)
61352118Smckusick 	register pmap_t pmap;
61452118Smckusick 	vm_offset_t sva, eva;
61552118Smckusick 	vm_prot_t prot;
61652118Smckusick {
61752118Smckusick 	register vm_offset_t va;
61852118Smckusick 	pmap_hash_t hp;
61952118Smckusick 	u_int p;
62052118Smckusick 
62152118Smckusick #ifdef DEBUG
62252118Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
62352118Smckusick 		printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot);
62452118Smckusick #endif
62552118Smckusick 	if (pmap == NULL)
62652118Smckusick 		return;
62752118Smckusick 
62852118Smckusick 	/* anything in the software cache? */
62952118Smckusick 	if (pmap->pm_tlbpid < 0 || pmap->pm_hash == zero_pmap_hash)
63052118Smckusick 		return;
63152118Smckusick 
63252118Smckusick 	if (!(prot & VM_PROT_READ)) {
63352118Smckusick 		pmap_remove(pmap, sva, eva);
63452118Smckusick 		return;
63552118Smckusick 	}
63652118Smckusick 
63752118Smckusick 	if (!pmap->pm_hash) {
63852118Smckusick 		register pt_entry_t *pte;
63952118Smckusick 
64052118Smckusick 		/*
64152118Smckusick 		 * Change entries in kernel pmap.
64252118Smckusick 		 * This will trap if the page is writeable (in order to set
64352118Smckusick 		 * the dirty bit) even if the dirty bit is already set. The
64452118Smckusick 		 * optimization isn't worth the effort since this code isn't
64552118Smckusick 		 * executed much. The common case is to make a user page
64652118Smckusick 		 * read-only.
64752118Smckusick 		 */
64852741Sralph #ifdef DIAGNOSTIC
64952741Sralph 		if (sva < VM_MIN_KERNEL_ADDRESS ||
65052741Sralph 		    eva > VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES*NPTEPG*NBPG)
65152741Sralph 			panic("pmap_protect");
65252741Sralph #endif
65352118Smckusick 		p = (prot & VM_PROT_WRITE) ? PG_RW : PG_RO;
65452118Smckusick 		pte = kvtopte(sva);
65552118Smckusick 		for (va = sva; va < eva; va += NBPG, pte++) {
65652118Smckusick 			if (!(pte->pt_entry & PG_V))
65752118Smckusick 				continue;
65852118Smckusick 			pte->pt_entry = (pte->pt_entry & ~(PG_M | PG_RO)) | p;
65952118Smckusick 			/*
66052118Smckusick 			 * Update the TLB if the given address is in the cache.
66152118Smckusick 			 */
66252118Smckusick 			MachTLBUpdate(va, pte->pt_entry);
66352118Smckusick 		}
66452118Smckusick 		return;
66552118Smckusick 	}
66652118Smckusick 
66752118Smckusick 	p = (prot & VM_PROT_WRITE) ? PG_RW : PG_RO;
66852118Smckusick 	va = sva | (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
66952118Smckusick 	eva |= (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
67052118Smckusick 	/*
67152118Smckusick 	 * If we are not in the current address space, just flush the
67252118Smckusick 	 * software cache and not the hardware.
67352118Smckusick 	 */
67452118Smckusick 	if (pmap != cur_pmap) {
67552118Smckusick 		for (; va < eva; va += NBPG) {
67652118Smckusick 			hp = &pmap->pm_hash[PMAP_HASH(va)];
67752118Smckusick 			if (hp->high != va)
67852118Smckusick 				continue;
67952118Smckusick 
68052118Smckusick 			hp->low = (hp->low & ~(PG_M | PG_RO)) | p;
68152118Smckusick 			pmap->pm_flags |= PM_MODIFIED;
68252118Smckusick 		}
68352118Smckusick 		return;
68452118Smckusick 	}
68552118Smckusick 
68652118Smckusick 	for (; va < eva; va += NBPG) {
68752118Smckusick 		hp = &pmap->pm_hash[PMAP_HASH(va)];
68852118Smckusick 		if (hp->high != va)
68952118Smckusick 			continue;
69052118Smckusick 
69152118Smckusick 		hp->low = (hp->low & ~(PG_M | PG_RO)) | p;
69252118Smckusick 		/*
69352118Smckusick 		 * Update the TLB if the given address is in the cache.
69452118Smckusick 		 */
69552118Smckusick 		MachTLBUpdate(hp->high, hp->low);
69652118Smckusick 	}
69752118Smckusick }
69852118Smckusick 
69952118Smckusick /*
70052118Smckusick  *	Insert the given physical page (p) at
70152118Smckusick  *	the specified virtual address (v) in the
70252118Smckusick  *	target physical map with the protection requested.
70352118Smckusick  *
70452118Smckusick  *	If specified, the page will be wired down, meaning
70552118Smckusick  *	that the related pte can not be reclaimed.
70652118Smckusick  *
70752118Smckusick  *	NB:  This is the only routine which MAY NOT lazy-evaluate
70852118Smckusick  *	or lose information.  That is, this routine must actually
70952118Smckusick  *	insert this page into the given map NOW.
71052118Smckusick  */
71152118Smckusick void
71252118Smckusick pmap_enter(pmap, va, pa, prot, wired)
71352118Smckusick 	register pmap_t pmap;
71452118Smckusick 	vm_offset_t va;
71552118Smckusick 	register vm_offset_t pa;
71652118Smckusick 	vm_prot_t prot;
71752118Smckusick 	boolean_t wired;
71852118Smckusick {
71952118Smckusick 	register pmap_hash_t hp;
72052118Smckusick 	register u_int npte;
72152118Smckusick 	register int i;
72252118Smckusick 
72352118Smckusick #ifdef DEBUG
72452118Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
72552118Smckusick 		printf("pmap_enter(%x, %x, %x, %x, %x)\n",
72652118Smckusick 		       pmap, va, pa, prot, wired);
72752118Smckusick #endif
72852118Smckusick #ifdef DIAGNOSTIC
72952118Smckusick 	if (!pmap)
73052118Smckusick 		panic("pmap_enter: pmap");
73152118Smckusick 	if (pmap->pm_tlbpid < 0)
73252118Smckusick 		panic("pmap_enter: tlbpid");
73352937Sralph 	if (!pmap->pm_hash) {
73452118Smckusick 		enter_stats.kernel++;
73552937Sralph 		if (va < VM_MIN_KERNEL_ADDRESS ||
73652937Sralph 		    va >= VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES*NPTEPG*NBPG)
73752118Smckusick 			panic("pmap_enter: kva");
73852118Smckusick 	} else {
73952118Smckusick 		enter_stats.user++;
74052118Smckusick 		if (va & 0x80000000)
74152118Smckusick 			panic("pmap_enter: uva");
74252118Smckusick 	}
74352741Sralph 	if (pa & 0x80000000)
74452741Sralph 		panic("pmap_enter: pa");
74552118Smckusick 	if (!(prot & VM_PROT_READ))
74652118Smckusick 		panic("pmap_enter: prot");
74752118Smckusick #endif
74852118Smckusick 
74952118Smckusick 	/*
75052118Smckusick 	 * See if we need to create a new TLB cache.
75152118Smckusick 	 */
75252118Smckusick 	if (pmap->pm_hash == zero_pmap_hash) {
75352118Smckusick 		register vm_offset_t kva;
75452118Smckusick 		register pt_entry_t *pte;
75552118Smckusick 
75652118Smckusick 		kva = kmem_alloc(kernel_map, PMAP_HASH_SIZE);
75752118Smckusick 		pmap->pm_hash = (pmap_hash_t)kva;
75852118Smckusick 
75952118Smckusick 		/*
76052118Smckusick 		 * Convert the kernel virtual address to a physical one
76152118Smckusick 		 * and cache it in the pmap. Note: if the phyical address
76252118Smckusick 		 * can change (due to memory compaction in kmem_alloc?),
76352118Smckusick 		 * we will have to update things.
76452118Smckusick 		 */
76552118Smckusick 		pte = kvtopte(kva);
76652118Smckusick 		for (i = 0; i < PMAP_HASH_UPAGES; i++) {
76752118Smckusick 			pmap->pm_hash_ptes[i] = pte->pt_entry & ~PG_G;
76852118Smckusick 			pte++;
76952118Smckusick 		}
77052118Smckusick 
77152118Smckusick 		/*
77252118Smckusick 		 * Map in new TLB cache if it is current.
77352118Smckusick 		 */
77452118Smckusick 		if (pmap == cur_pmap) {
77552118Smckusick 			for (i = 0; i < PMAP_HASH_UPAGES; i++) {
77652118Smckusick 				MachTLBWriteIndexed(i + UPAGES,
77752118Smckusick 					(PMAP_HASH_UADDR + (i << PGSHIFT)) |
77852118Smckusick 						(pmap->pm_tlbpid  <<
77952118Smckusick 						VMMACH_TLB_PID_SHIFT),
78052118Smckusick 					pmap->pm_hash_ptes[i]);
78152118Smckusick 			}
78252118Smckusick 		}
78352118Smckusick #ifdef DIAGNOSTIC
78452118Smckusick 		for (i = 0; i < PAGE_SIZE; i += sizeof(int), kva += sizeof(int))
78552118Smckusick 			if (*(int *)kva != 0)
78652118Smckusick 				panic("pmap_enter: *kva != 0");
78752118Smckusick #endif
78852118Smckusick 	}
78952118Smckusick 
79052118Smckusick 	if (IS_VM_PHYSADDR(pa)) {
79152118Smckusick 		register pv_entry_t pv, npv;
79252118Smckusick 		int s;
79352118Smckusick 
79452118Smckusick 		if (!(prot & VM_PROT_WRITE))
79552118Smckusick 			npte = PG_RO;
79652118Smckusick 		else {
79752118Smckusick 			register vm_page_t mem;
79852118Smckusick 
79952118Smckusick 			mem = PHYS_TO_VM_PAGE(pa);
80052118Smckusick 			if ((int)va < 0) {
80152118Smckusick 				/*
80252118Smckusick 				 * Don't bother to trap on kernel writes,
80352118Smckusick 				 * just record page as dirty.
80452118Smckusick 				 */
80552118Smckusick 				npte = PG_M;
806*53611Sralph 				mem->clean = FALSE;
80752118Smckusick 			} else
80852118Smckusick #ifdef ATTR
80952118Smckusick 				if ((pmap_attributes[atop(pa - KERNBASE)] &
81052118Smckusick 				    PMAP_ATTR_MOD) || !mem->clean)
81152118Smckusick #else
81252118Smckusick 				if (!mem->clean)
81352118Smckusick #endif
81452118Smckusick 					npte = PG_M;
81552118Smckusick 			else
81652118Smckusick 				npte = 0;
81752118Smckusick 		}
81852118Smckusick 
81952118Smckusick #ifdef DEBUG
82052118Smckusick 		enter_stats.managed++;
82152118Smckusick #endif
82252118Smckusick 		/*
82352118Smckusick 		 * Enter the pmap and virtual address into the
82452118Smckusick 		 * physical to virtual map table.
82552118Smckusick 		 */
82652118Smckusick 		pv = pa_to_pvh(pa);
82752118Smckusick 		s = splimp();
82852118Smckusick #ifdef DEBUG
82952118Smckusick 		if (pmapdebug & PDB_ENTER)
83052118Smckusick 			printf("pmap_enter: pv %x: was %x/%x/%x\n",
83152118Smckusick 			       pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
83252118Smckusick #endif
83352118Smckusick 		if (pv->pv_pmap == NULL) {
83452118Smckusick 			/*
83552118Smckusick 			 * No entries yet, use header as the first entry
83652118Smckusick 			 */
83752118Smckusick #ifdef DEBUG
83852118Smckusick 			enter_stats.firstpv++;
83952118Smckusick #endif
84052118Smckusick 			pv->pv_va = va;
84152118Smckusick 			pv->pv_pmap = pmap;
84252118Smckusick 			pv->pv_next = NULL;
84352118Smckusick 		} else {
84452118Smckusick 			/*
84552118Smckusick 			 * There is at least one other VA mapping this page.
84652118Smckusick 			 * Place this entry after the header.
84752118Smckusick 			 *
84852118Smckusick 			 * Note: the entry may already be in the table if
84952118Smckusick 			 * we are only changing the protection bits.
85052118Smckusick 			 */
85152118Smckusick 			for (npv = pv; npv; npv = npv->pv_next)
85252118Smckusick 				if (pmap == npv->pv_pmap && va == npv->pv_va) {
85352118Smckusick #ifdef DIAGNOSTIC
85452118Smckusick 				    if (!pmap->pm_hash) {
85552118Smckusick 					unsigned entry;
85652118Smckusick 
85752118Smckusick 					entry = kvtopte(va)->pt_entry;
85852118Smckusick 					if (!(entry & PG_V) ||
85952118Smckusick 					    (entry & PG_FRAME) != pa)
86052118Smckusick 			printf("found kva %x pa %x in pv_table but != %x\n",
86152118Smckusick 				va, pa, entry);
86252118Smckusick 				    } else {
86352118Smckusick 					hp = &pmap->pm_hash[PMAP_HASH(va)];
86452118Smckusick 					if (hp->high != (va |
86552118Smckusick 					    (pmap->pm_tlbpid <<
86652118Smckusick 					    VMMACH_TLB_PID_SHIFT)) ||
86752118Smckusick 					    (hp->low & PG_FRAME) != pa)
86852118Smckusick 			printf("found va %x pa %x in pv_table but != %x %x\n",
86952118Smckusick 				va, pa, hp->high, hp->low);
87052118Smckusick 				    }
87152118Smckusick #endif
87252118Smckusick 					goto fnd;
87352118Smckusick 				}
87452118Smckusick 			/* can this cause us to recurse forever? */
87552118Smckusick 			npv = (pv_entry_t)
87652118Smckusick 				malloc(sizeof *npv, M_VMPVENT, M_NOWAIT);
87752118Smckusick 			npv->pv_va = va;
87852118Smckusick 			npv->pv_pmap = pmap;
87952118Smckusick 			npv->pv_next = pv->pv_next;
88052118Smckusick 			pv->pv_next = npv;
88152118Smckusick #ifdef DEBUG
88252118Smckusick 			if (!npv->pv_next)
88352118Smckusick 				enter_stats.secondpv++;
88452118Smckusick #endif
88552118Smckusick 		fnd:
88652118Smckusick 			;
88752118Smckusick 		}
88852118Smckusick 		splx(s);
88952118Smckusick 	} else {
89052118Smckusick 		/*
89152118Smckusick 		 * Assumption: if it is not part of our managed memory
89252118Smckusick 		 * then it must be device memory which may be volitile.
89352118Smckusick 		 */
89452118Smckusick #ifdef DEBUG
89552118Smckusick 		enter_stats.unmanaged++;
89652118Smckusick #endif
89752118Smckusick 		printf("pmap_enter: UNMANAGED ADDRESS va %x pa %x\n",
89852118Smckusick 			va, pa); /* XXX */
89952118Smckusick 		npte = (prot & VM_PROT_WRITE) ? PG_M : PG_RO;
90052118Smckusick 	}
90152118Smckusick 
90252741Sralph 	/*
90352741Sralph 	 * The only time we need to flush the cache is if we
90452741Sralph 	 * execute from a physical address and then change the data.
90552741Sralph 	 * This is the best place to do this.
90652741Sralph 	 * pmap_protect() and pmap_remove() are mostly used to switch
90752741Sralph 	 * between R/W and R/O pages.
90852741Sralph 	 * NOTE: we only support cache flush for read only text.
90952741Sralph 	 */
91052741Sralph 	if (prot == (VM_PROT_READ | VM_PROT_EXECUTE))
911*53611Sralph 		MachFlushICache(MACH_PHYS_TO_CACHED(pa), PAGE_SIZE);
91252741Sralph 
91352118Smckusick 	if (!pmap->pm_hash) {
91452118Smckusick 		register pt_entry_t *pte;
91552118Smckusick 
91652118Smckusick 		/* enter entries into kernel pmap */
91752118Smckusick 		pte = kvtopte(va);
91852118Smckusick 		npte |= pa | PG_V | PG_G;
91952118Smckusick 		if (wired) {
92052118Smckusick 			pmap->pm_stats.wired_count += pmaxpagesperpage;
92152118Smckusick 			npte |= PG_WIRED;
92252118Smckusick 		}
92352118Smckusick 		i = pmaxpagesperpage;
92452118Smckusick 		do {
92552118Smckusick 			if (!(pte->pt_entry & PG_V)) {
92652118Smckusick 				pmap->pm_stats.resident_count++;
92752118Smckusick 				MachTLBWriteRandom(va, npte);
92852118Smckusick 			} else {
92952937Sralph #ifdef DIAGNOSTIC
93052937Sralph 				if (pte->pt_entry & PG_WIRED)
93152937Sralph 					panic("pmap_enter: kernel wired");
93252937Sralph #endif
93352118Smckusick 				/*
93452118Smckusick 				 * Update the same virtual address entry.
93552118Smckusick 				 */
93652118Smckusick 				MachTLBUpdate(va, npte);
93752937Sralph 				printf("TLB update kva %x pte %x -> %x\n",
93852937Sralph 					va, pte->pt_entry, npte); /* XXX */
93952118Smckusick 			}
94052118Smckusick 			pte->pt_entry = npte;
94152118Smckusick 			va += NBPG;
94252118Smckusick 			npte += NBPG;
94352118Smckusick 			pte++;
94452118Smckusick 		} while (--i != 0);
94552118Smckusick 		return;
94652118Smckusick 	}
94752118Smckusick 
94852118Smckusick 	/*
94952118Smckusick 	 * Now validate mapping with desired protection/wiring.
95052118Smckusick 	 * Assume uniform modified and referenced status for all
95152118Smckusick 	 * PMAX pages in a MACH page.
95252118Smckusick 	 */
95352118Smckusick 	npte |= pa | PG_V;
95452118Smckusick 	if (wired) {
95552118Smckusick 		pmap->pm_stats.wired_count += pmaxpagesperpage;
95652118Smckusick 		npte |= PG_WIRED;
95752118Smckusick 	}
95852118Smckusick #ifdef DEBUG
95952118Smckusick 	if (pmapdebug & PDB_ENTER)
96052118Smckusick 		printf("pmap_enter: new pte value %x\n", npte);
96152118Smckusick #endif
96252118Smckusick 	va |= (pmap->pm_tlbpid << VMMACH_TLB_PID_SHIFT);
96352118Smckusick 	i = pmaxpagesperpage;
96452118Smckusick 	do {
96552118Smckusick 		hp = &pmap->pm_hash[PMAP_HASH(va)];
96652118Smckusick 		if (!hp->high) {
96752118Smckusick 			pmap->pm_stats.resident_count++;
96852118Smckusick 			hp->high = va;
96952118Smckusick 			hp->low = npte;
97052118Smckusick 			MachTLBWriteRandom(va, npte);
97152118Smckusick 		} else {
97252118Smckusick #ifdef DEBUG
97352118Smckusick 			enter_stats.cachehit++;
97452118Smckusick #endif
97552937Sralph 			if (!(hp->low & PG_WIRED)) {
97652937Sralph 				if (hp->high == va &&
97752937Sralph 				    (hp->low & PG_FRAME) == (npte & PG_FRAME)) {
97852937Sralph 					/*
97952937Sralph 					 * Update the same entry.
98052937Sralph 					 */
98152937Sralph 					hp->low = npte;
98252937Sralph 					MachTLBUpdate(va, npte);
98352937Sralph 				} else {
98452937Sralph 					MachTLBFlushAddr(hp->high);
98552937Sralph 					pmap_remove_pv(pmap,
98652937Sralph 						hp->high & PG_FRAME,
98752937Sralph 						hp->low & PG_FRAME);
98852937Sralph 					hp->high = va;
98952937Sralph 					hp->low = npte;
99052937Sralph 					MachTLBWriteRandom(va, npte);
99152937Sralph 				}
99252118Smckusick 			} else {
99352118Smckusick 				/*
99452118Smckusick 				 * Don't replace wired entries, just update
99552118Smckusick 				 * the hardware TLB.
99652118Smckusick 				 * Bug: routines to flush the TLB won't know
99752118Smckusick 				 * that the entry is in the hardware.
99852118Smckusick 				 */
99952118Smckusick 				printf("pmap_enter: wired va %x %x\n", va,
100052118Smckusick 					hp->low); /* XXX */
100152118Smckusick 				panic("pmap_enter: wired"); /* XXX */
100252118Smckusick 				MachTLBWriteRandom(va, npte);
100352118Smckusick 			}
100452118Smckusick 		}
100552118Smckusick 		va += NBPG;
100652118Smckusick 		npte += NBPG;
100752118Smckusick 	} while (--i != 0);
100852118Smckusick }
100952118Smckusick 
101052118Smckusick /*
101152118Smckusick  *	Routine:	pmap_change_wiring
101252118Smckusick  *	Function:	Change the wiring attribute for a map/virtual-address
101352118Smckusick  *			pair.
101452118Smckusick  *	In/out conditions:
101552118Smckusick  *			The mapping must already exist in the pmap.
101652118Smckusick  */
101752118Smckusick void
101852118Smckusick pmap_change_wiring(pmap, va, wired)
101952118Smckusick 	register pmap_t	pmap;
102052118Smckusick 	vm_offset_t va;
102152118Smckusick 	boolean_t wired;
102252118Smckusick {
102352118Smckusick 	register pmap_hash_t hp;
102452118Smckusick 	u_int p;
102552118Smckusick 	int i;
102652118Smckusick 
102752118Smckusick #ifdef DEBUG
102852118Smckusick 	if (pmapdebug & PDB_FOLLOW)
102952118Smckusick 		printf("pmap_change_wiring(%x, %x, %x)\n", pmap, va, wired);
103052118Smckusick #endif
103152118Smckusick 	if (pmap == NULL)
103252118Smckusick 		return;
103352118Smckusick 
103452118Smckusick 	p = wired ? PG_WIRED : 0;
103552118Smckusick 
103652118Smckusick 	/*
103752118Smckusick 	 * Don't need to flush the TLB since PG_WIRED is only in software.
103852118Smckusick 	 */
103952118Smckusick 	if (!pmap->pm_hash) {
104052118Smckusick 		register pt_entry_t *pte;
104152118Smckusick 
104252118Smckusick 		/* change entries in kernel pmap */
104352741Sralph #ifdef DIAGNOSTIC
104452741Sralph 		if (va < VM_MIN_KERNEL_ADDRESS ||
104552741Sralph 		    va >= VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES*NPTEPG*NBPG)
104652741Sralph 			panic("pmap_change_wiring");
104752741Sralph #endif
104852118Smckusick 		pte = kvtopte(va);
104952118Smckusick 		i = pmaxpagesperpage;
105052118Smckusick 		if (!(pte->pt_entry & PG_WIRED) && p)
105152118Smckusick 			pmap->pm_stats.wired_count += i;
105252118Smckusick 		else if ((pte->pt_entry & PG_WIRED) && !p)
105352118Smckusick 			pmap->pm_stats.wired_count -= i;
105452118Smckusick 		do {
105552118Smckusick 			if (!(pte->pt_entry & PG_V))
105652118Smckusick 				continue;
105752118Smckusick 			pte->pt_entry = (pte->pt_entry & ~PG_WIRED) | p;
105852118Smckusick 			pte++;
105952118Smckusick 		} while (--i != 0);
106052118Smckusick 	} else if (pmap->pm_tlbpid >= 0 && pmap->pm_hash != zero_pmap_hash) {
106152118Smckusick 		i = pmaxpagesperpage;
106252118Smckusick 		do {
106352118Smckusick 			hp = &pmap->pm_hash[PMAP_HASH(va)];
106452118Smckusick 			if (!hp->high)
106552118Smckusick 				continue;
106652118Smckusick 			if (!(hp->low & PG_WIRED) && p)
106752118Smckusick 				pmap->pm_stats.wired_count++;
106852118Smckusick 			else if ((hp->low & PG_WIRED) && !p)
106952118Smckusick 				pmap->pm_stats.wired_count--;
107052118Smckusick 			hp->low = (hp->low & ~PG_WIRED) | p;
107152118Smckusick 			va += NBPG;
107252118Smckusick 		} while (--i != 0);
107352118Smckusick 	}
107452118Smckusick }
107552118Smckusick 
107652118Smckusick /*
107752118Smckusick  *	Routine:	pmap_extract
107852118Smckusick  *	Function:
107952118Smckusick  *		Extract the physical page address associated
108052118Smckusick  *		with the given map/virtual_address pair.
108152118Smckusick  */
108252118Smckusick vm_offset_t
108352118Smckusick pmap_extract(pmap, va)
108452118Smckusick 	register pmap_t	pmap;
108552118Smckusick 	vm_offset_t va;
108652118Smckusick {
108752118Smckusick 	register vm_offset_t pa;
108852118Smckusick 	register pmap_hash_t hp;
108952118Smckusick 
109052118Smckusick #ifdef DEBUG
109152118Smckusick 	if (pmapdebug & PDB_FOLLOW)
109252118Smckusick 		printf("pmap_extract(%x, %x) -> ", pmap, va);
109352118Smckusick #endif
109452118Smckusick 
109552741Sralph 	if (!pmap->pm_hash) {
109652741Sralph #ifdef DIAGNOSTIC
109752741Sralph 		if (va < VM_MIN_KERNEL_ADDRESS ||
109852741Sralph 		    va >= VM_MIN_KERNEL_ADDRESS + PMAP_HASH_KPAGES*NPTEPG*NBPG)
109952741Sralph 			panic("pmap_extract");
110052741Sralph #endif
110152118Smckusick 		pa = kvtopte(va)->pt_entry & PG_FRAME;
110252741Sralph 	} else if (pmap->pm_tlbpid >= 0) {
110352118Smckusick 		hp = &pmap->pm_hash[PMAP_HASH(va)];
110452118Smckusick 		if (hp->high)
110552118Smckusick 			pa = hp->low & PG_FRAME;
110652118Smckusick 		else
110752118Smckusick 			pa = 0;
110852118Smckusick 	} else
110952118Smckusick 		pa = 0;
111052118Smckusick 
111152118Smckusick #ifdef DEBUG
111252118Smckusick 	if (pmapdebug & PDB_FOLLOW)
111352118Smckusick 		printf("%x\n", pa);
111452118Smckusick #endif
111552741Sralph 	return (pa);
111652118Smckusick }
111752118Smckusick 
111852118Smckusick /*
111952118Smckusick  *	Copy the range specified by src_addr/len
112052118Smckusick  *	from the source map to the range dst_addr/len
112152118Smckusick  *	in the destination map.
112252118Smckusick  *
112352118Smckusick  *	This routine is only advisory and need not do anything.
112452118Smckusick  */
112552741Sralph void
112652741Sralph pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
112752118Smckusick 	pmap_t dst_pmap;
112852118Smckusick 	pmap_t src_pmap;
112952118Smckusick 	vm_offset_t dst_addr;
113052118Smckusick 	vm_size_t len;
113152118Smckusick 	vm_offset_t src_addr;
113252118Smckusick {
113352118Smckusick 
113452118Smckusick #ifdef DEBUG
113552118Smckusick 	if (pmapdebug & PDB_FOLLOW)
113652118Smckusick 		printf("pmap_copy(%x, %x, %x, %x, %x)\n",
113752118Smckusick 		       dst_pmap, src_pmap, dst_addr, len, src_addr);
113852118Smckusick #endif
113952118Smckusick }
114052118Smckusick 
114152118Smckusick /*
114252118Smckusick  *	Require that all active physical maps contain no
114352118Smckusick  *	incorrect entries NOW.  [This update includes
114452118Smckusick  *	forcing updates of any address map caching.]
114552118Smckusick  *
114652118Smckusick  *	Generally used to insure that a thread about
114752118Smckusick  *	to run will see a semantically correct world.
114852118Smckusick  */
114952741Sralph void
115052741Sralph pmap_update()
115152118Smckusick {
115252118Smckusick 
115352118Smckusick #ifdef DEBUG
115452118Smckusick 	if (pmapdebug & PDB_FOLLOW)
115552118Smckusick 		printf("pmap_update()\n");
115652118Smckusick #endif
115752118Smckusick }
115852118Smckusick 
115952118Smckusick /*
116052118Smckusick  *	Routine:	pmap_collect
116152118Smckusick  *	Function:
116252118Smckusick  *		Garbage collects the physical map system for
116352118Smckusick  *		pages which are no longer used.
116452118Smckusick  *		Success need not be guaranteed -- that is, there
116552118Smckusick  *		may well be pages which are not referenced, but
116652118Smckusick  *		others may be collected.
116752118Smckusick  *	Usage:
116852118Smckusick  *		Called by the pageout daemon when pages are scarce.
116952118Smckusick  */
117052118Smckusick void
117152118Smckusick pmap_collect(pmap)
117252118Smckusick 	pmap_t pmap;
117352118Smckusick {
117452118Smckusick 
117552118Smckusick #ifdef DEBUG
117652118Smckusick 	if (pmapdebug & PDB_FOLLOW)
117752118Smckusick 		printf("pmap_collect(%x)\n", pmap);
117852118Smckusick #endif
117952118Smckusick }
118052118Smckusick 
118152118Smckusick /*
118252118Smckusick  *	pmap_zero_page zeros the specified (machine independent)
118352118Smckusick  *	page.
118452118Smckusick  */
118552118Smckusick void
118652118Smckusick pmap_zero_page(phys)
118752741Sralph 	vm_offset_t phys;
118852118Smckusick {
118952741Sralph 	register int *p, *end;
119052118Smckusick 
119152118Smckusick #ifdef DEBUG
119252118Smckusick 	if (pmapdebug & PDB_FOLLOW)
119352118Smckusick 		printf("pmap_zero_page(%x)\n", phys);
119452118Smckusick #endif
119552741Sralph 	p = (int *)MACH_PHYS_TO_CACHED(phys);
119652741Sralph 	end = p + PAGE_SIZE / sizeof(int);
119752118Smckusick 	do {
119852741Sralph 		p[0] = 0;
119952741Sralph 		p[1] = 0;
120052741Sralph 		p[2] = 0;
120152741Sralph 		p[3] = 0;
120252741Sralph 		p += 4;
120352741Sralph 	} while (p != end);
120452118Smckusick }
120552118Smckusick 
120652118Smckusick /*
120752118Smckusick  *	pmap_copy_page copies the specified (machine independent)
120852118Smckusick  *	page.
120952118Smckusick  */
121052118Smckusick void
121152118Smckusick pmap_copy_page(src, dst)
121252741Sralph 	vm_offset_t src, dst;
121352118Smckusick {
121452741Sralph 	register int *s, *d, *end;
121552741Sralph 	register int tmp0, tmp1, tmp2, tmp3;
121652118Smckusick 
121752118Smckusick #ifdef DEBUG
121852118Smckusick 	if (pmapdebug & PDB_FOLLOW)
121952118Smckusick 		printf("pmap_copy_page(%x, %x)\n", src, dst);
122052118Smckusick #endif
122152741Sralph 	s = (int *)MACH_PHYS_TO_CACHED(src);
122252741Sralph 	d = (int *)MACH_PHYS_TO_CACHED(dst);
122352741Sralph 	end = s + PAGE_SIZE / sizeof(int);
122452118Smckusick 	do {
122552741Sralph 		tmp0 = s[0];
122652741Sralph 		tmp1 = s[1];
122752741Sralph 		tmp2 = s[2];
122852741Sralph 		tmp3 = s[3];
122952741Sralph 		d[0] = tmp0;
123052741Sralph 		d[1] = tmp1;
123152741Sralph 		d[2] = tmp2;
123252741Sralph 		d[3] = tmp3;
123352741Sralph 		s += 4;
123452741Sralph 		d += 4;
123552741Sralph 	} while (s != end);
123652118Smckusick }
123752118Smckusick 
123852118Smckusick /*
123952118Smckusick  *	Routine:	pmap_pageable
124052118Smckusick  *	Function:
124152118Smckusick  *		Make the specified pages (by pmap, offset)
124252118Smckusick  *		pageable (or not) as requested.
124352118Smckusick  *
124452118Smckusick  *		A page which is not pageable may not take
124552118Smckusick  *		a fault; therefore, its page table entry
124652118Smckusick  *		must remain valid for the duration.
124752118Smckusick  *
124852118Smckusick  *		This routine is merely advisory; pmap_enter
124952118Smckusick  *		will specify that these pages are to be wired
125052118Smckusick  *		down (or not) as appropriate.
125152118Smckusick  */
125252118Smckusick void
125352118Smckusick pmap_pageable(pmap, sva, eva, pageable)
125452118Smckusick 	pmap_t		pmap;
125552118Smckusick 	vm_offset_t	sva, eva;
125652118Smckusick 	boolean_t	pageable;
125752118Smckusick {
125852118Smckusick 
125952118Smckusick #ifdef DEBUG
126052118Smckusick 	if (pmapdebug & PDB_FOLLOW)
126152118Smckusick 		printf("pmap_pageable(%x, %x, %x, %x)\n",
126252118Smckusick 		       pmap, sva, eva, pageable);
126352118Smckusick #endif
126452118Smckusick }
126552118Smckusick 
126652118Smckusick /*
126752118Smckusick  *	Clear the modify bits on the specified physical page.
126852118Smckusick  */
126952118Smckusick void
127052118Smckusick pmap_clear_modify(pa)
127152118Smckusick 	vm_offset_t pa;
127252118Smckusick {
127352118Smckusick 	pmap_hash_t hp;
127452118Smckusick 
127552118Smckusick #ifdef DEBUG
127652118Smckusick 	if (pmapdebug & PDB_FOLLOW)
127752118Smckusick 		printf("pmap_clear_modify(%x)\n", pa);
127852118Smckusick #endif
127952118Smckusick #ifdef ATTR
128052118Smckusick 	pmap_attributes[atop(pa - KERNBASE)] &= ~PMAP_ATTR_MOD;
128152118Smckusick #endif
128252118Smckusick }
128352118Smckusick 
128452118Smckusick /*
128552118Smckusick  *	pmap_clear_reference:
128652118Smckusick  *
128752118Smckusick  *	Clear the reference bit on the specified physical page.
128852118Smckusick  */
128952118Smckusick void
129052118Smckusick pmap_clear_reference(pa)
129152118Smckusick 	vm_offset_t pa;
129252118Smckusick {
129352118Smckusick 
129452118Smckusick #ifdef DEBUG
129552118Smckusick 	if (pmapdebug & PDB_FOLLOW)
129652118Smckusick 		printf("pmap_clear_reference(%x)\n", pa);
129752118Smckusick #endif
129852118Smckusick #ifdef ATTR
129952118Smckusick 	pmap_attributes[atop(pa - KERNBASE)] &= ~PMAP_ATTR_REF;
130052118Smckusick #endif
130152118Smckusick }
130252118Smckusick 
130352118Smckusick /*
130452118Smckusick  *	pmap_is_referenced:
130552118Smckusick  *
130652118Smckusick  *	Return whether or not the specified physical page is referenced
130752118Smckusick  *	by any physical maps.
130852118Smckusick  */
130952118Smckusick boolean_t
131052118Smckusick pmap_is_referenced(pa)
131152118Smckusick 	vm_offset_t pa;
131252118Smckusick {
131352118Smckusick #ifdef ATTR
131452741Sralph 	return (pmap_attributes[atop(pa - KERNBASE)] & PMAP_ATTR_REF);
131552118Smckusick #else
131652741Sralph 	return (FALSE);
131752118Smckusick #endif
131852118Smckusick }
131952118Smckusick 
132052118Smckusick /*
132152118Smckusick  *	pmap_is_modified:
132252118Smckusick  *
132352118Smckusick  *	Return whether or not the specified physical page is modified
132452118Smckusick  *	by any physical maps.
132552118Smckusick  */
132652118Smckusick boolean_t
132752118Smckusick pmap_is_modified(pa)
132852118Smckusick 	vm_offset_t pa;
132952118Smckusick {
133052118Smckusick #ifdef ATTR
133152741Sralph 	return (pmap_attributes[atop(pa - KERNBASE)] & PMAP_ATTR_MOD);
133252118Smckusick #else
133352741Sralph 	return (FALSE);
133452118Smckusick #endif
133552118Smckusick }
133652118Smckusick 
133752118Smckusick vm_offset_t
133852118Smckusick pmap_phys_address(ppn)
133952118Smckusick 	int ppn;
134052118Smckusick {
134152118Smckusick 
134252118Smckusick #ifdef DEBUG
134352118Smckusick 	if (pmapdebug & PDB_FOLLOW)
134452118Smckusick 		printf("pmap_phys_address(%x)\n", ppn);
134552118Smckusick #endif
134652118Smckusick 	panic("pmap_phys_address"); /* XXX */
134752741Sralph 	return (pmax_ptob(ppn));
134852118Smckusick }
134952118Smckusick 
135052118Smckusick /*
135152118Smckusick  * Miscellaneous support routines
135252118Smckusick  */
135352118Smckusick 
135452118Smckusick /*
135552118Smckusick  * Allocate a hardware PID and return it.
135652118Smckusick  * Also, change the hardwired TLB entry to point to the current TLB cache.
135752118Smckusick  * This is called by swtch().
135852118Smckusick  */
135952118Smckusick int
136052118Smckusick pmap_alloc_tlbpid(p)
136152118Smckusick 	register struct proc *p;
136252118Smckusick {
136352118Smckusick 	register pmap_t pmap;
136452118Smckusick 	register u_int i;
136552118Smckusick 	register int id;
136652118Smckusick 
136752118Smckusick 	pmap = &p->p_vmspace->vm_pmap;
136852118Smckusick 	if ((id = pmap->pm_tlbpid) >= 0) {
136952118Smckusick 		if (pmap->pm_flags & PM_MODIFIED) {
137052118Smckusick 			pmap->pm_flags &= ~PM_MODIFIED;
137152118Smckusick 			MachTLBFlushPID(id);
137252118Smckusick 		}
137352118Smckusick 		goto done;
137452118Smckusick 	}
137552118Smckusick 
137652118Smckusick 	if ((i = whichpids[0]) != 0xFFFFFFFF)
137752118Smckusick 		id = 0;
137852118Smckusick 	else if ((i = whichpids[1]) != 0xFFFFFFFF)
137952118Smckusick 		id = 32;
138052118Smckusick 	else {
138152118Smckusick 		register struct proc *q;
138252118Smckusick 		register pmap_t q_pmap;
138352118Smckusick 
138452118Smckusick 		/*
138552118Smckusick 		 * Have to find a tlbpid to recycle.
138652118Smckusick 		 * There is probably a better way to do this.
138752118Smckusick 		 */
138852118Smckusick 		for (q = allproc; q != NULL; q = q->p_nxt) {
138952118Smckusick 			q_pmap = &q->p_vmspace->vm_pmap;
139052118Smckusick 			if ((id = q_pmap->pm_tlbpid) < 0)
139152118Smckusick 				continue;
139252118Smckusick 			if (q->p_stat != SRUN)
139352118Smckusick 				goto fnd;
139452118Smckusick 		}
139552118Smckusick 		if (id < 0)
139652118Smckusick 			panic("TLBPidAlloc");
139752118Smckusick 	fnd:
139852118Smckusick 		printf("pmap_alloc_tlbpid: recycle pid %d (%s) tlbpid %d\n",
139952118Smckusick 			q->p_pid, q->p_comm, id); /* XXX */
140052118Smckusick 		/*
140152118Smckusick 		 * Even though the virtual to physical mapping hasn't changed,
140252118Smckusick 		 * we need to clear the PID tag in the high entry of the cache.
140352118Smckusick 		 */
140452118Smckusick 		if (q_pmap->pm_hash != zero_pmap_hash) {
140552118Smckusick 			register pmap_hash_t hp;
140652118Smckusick 
140752118Smckusick 			hp = q_pmap->pm_hash;
140852118Smckusick 			for (i = 0; i < PMAP_HASH_NUM_ENTRIES; i++, hp++) {
140952118Smckusick 				if (!hp->high)
141052118Smckusick 					continue;
141152118Smckusick 
141252118Smckusick 				if (hp->low & PG_WIRED) {
141352118Smckusick 					printf("Clearing wired user entry! h %x l %x\n", hp->high, hp->low);
141452118Smckusick 					panic("pmap_alloc_tlbpid: wired");
141552118Smckusick 				}
141653606Smckusick 				pmap_remove_pv(q_pmap, hp->high & PG_FRAME,
141752118Smckusick 					hp->low & PG_FRAME);
141852118Smckusick 				hp->high = 0;
141952118Smckusick 				q_pmap->pm_stats.resident_count--;
142052118Smckusick 			}
142152118Smckusick 		}
142252118Smckusick 		q_pmap->pm_tlbpid = -1;
142352118Smckusick 		MachTLBFlushPID(id);
142452118Smckusick #ifdef DEBUG
142552118Smckusick 		remove_stats.pidflushes++;
142652118Smckusick #endif
142752118Smckusick 		pmap->pm_tlbpid = id;
142852118Smckusick 		goto done;
142952118Smckusick 	}
143052118Smckusick 	while (i & 1) {
143152118Smckusick 		i >>= 1;
143252118Smckusick 		id++;
143352118Smckusick 	}
143452118Smckusick 	whichpids[id >> 5] |= 1 << (id & 0x1F);
143552118Smckusick 	pmap->pm_tlbpid = id;
143652118Smckusick done:
143752118Smckusick 	/*
143852118Smckusick 	 * Map in new TLB cache.
143952118Smckusick 	 */
144052118Smckusick 	if (pmap == cur_pmap)
144152118Smckusick 		return (id);
144252118Smckusick 	cur_pmap = pmap;
144352118Smckusick 	for (i = 0; i < PMAP_HASH_UPAGES; i++) {
144452118Smckusick 		MachTLBWriteIndexed(i + UPAGES,
144552118Smckusick 			(PMAP_HASH_UADDR + (i << PGSHIFT)) |
144652118Smckusick 				(id << VMMACH_TLB_PID_SHIFT),
144752118Smckusick 			pmap->pm_hash_ptes[i]);
144852118Smckusick 	}
144952118Smckusick 	return (id);
145052118Smckusick }
145152118Smckusick 
145252118Smckusick /*
145352118Smckusick  * Remove a physical to virtual address translation.
145452118Smckusick  */
145552118Smckusick void
145652118Smckusick pmap_remove_pv(pmap, va, pa)
145752118Smckusick 	pmap_t pmap;
145852118Smckusick 	vm_offset_t va, pa;
145952118Smckusick {
146052118Smckusick 	register pv_entry_t pv, npv;
146152118Smckusick 	int s;
146252118Smckusick 
146352118Smckusick #ifdef DEBUG
146452118Smckusick 	if (pmapdebug & PDB_FOLLOW)
146552118Smckusick 		printf("pmap_remove_pv(%x, %x, %x)\n", pmap, va, pa);
146652118Smckusick #endif
146752118Smckusick 	/*
146852118Smckusick 	 * Remove page from the PV table (raise IPL since we
146952118Smckusick 	 * may be called at interrupt time).
147052118Smckusick 	 */
147152118Smckusick 	if (!IS_VM_PHYSADDR(pa))
147252118Smckusick 		return;
147352118Smckusick 	pv = pa_to_pvh(pa);
147452118Smckusick 	s = splimp();
147552118Smckusick 	/*
147652118Smckusick 	 * If it is the first entry on the list, it is actually
147752118Smckusick 	 * in the header and we must copy the following entry up
147852118Smckusick 	 * to the header.  Otherwise we must search the list for
147952118Smckusick 	 * the entry.  In either case we free the now unused entry.
148052118Smckusick 	 */
148152118Smckusick 	if (pmap == pv->pv_pmap && va == pv->pv_va) {
148252118Smckusick 		npv = pv->pv_next;
148352118Smckusick 		if (npv) {
148452118Smckusick 			*pv = *npv;
148552118Smckusick 			free((caddr_t)npv, M_VMPVENT);
148652118Smckusick 		} else
148752118Smckusick 			pv->pv_pmap = NULL;
148852118Smckusick #ifdef DEBUG
148952118Smckusick 		remove_stats.pvfirst++;
149052118Smckusick #endif
149152118Smckusick 	} else {
149252118Smckusick 		for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
149352118Smckusick #ifdef DEBUG
149452118Smckusick 			remove_stats.pvsearch++;
149552118Smckusick #endif
149652118Smckusick 			if (pmap == npv->pv_pmap && va == npv->pv_va)
149752118Smckusick 				goto fnd;
149852118Smckusick 		}
149952118Smckusick #ifdef DIAGNOSTIC
150052118Smckusick 		printf("pmap_remove_pv(%x, %x, %x) not found\n", pmap, va, pa);
150152118Smckusick 		panic("pmap_remove_pv");
150252118Smckusick #endif
150352118Smckusick 	fnd:
150452118Smckusick 		pv->pv_next = npv->pv_next;
150552118Smckusick 		free((caddr_t)npv, M_VMPVENT);
150652118Smckusick 	}
150752118Smckusick 	splx(s);
150852118Smckusick }
150952118Smckusick 
151052118Smckusick #ifdef DEBUG
151152118Smckusick pmap_print(pmap)
151252118Smckusick 	pmap_t pmap;
151352118Smckusick {
151452118Smckusick 	register pmap_hash_t hp;
151552118Smckusick 	register int i;
151652118Smckusick 
151752118Smckusick 	printf("\tpmap_print(%x)\n", pmap);
151852118Smckusick 
151952118Smckusick 	if (pmap->pm_hash == zero_pmap_hash) {
152052118Smckusick 		printf("pm_hash == zero\n");
152152118Smckusick 		return;
152252118Smckusick 	}
152352118Smckusick 	if (pmap->pm_hash == (pmap_hash_t)0) {
152452118Smckusick 		printf("pm_hash == kernel\n");
152552118Smckusick 		return;
152652118Smckusick 	}
152752118Smckusick 	hp = pmap->pm_hash;
152852118Smckusick 	for (i = 0; i < PMAP_HASH_NUM_ENTRIES; i++, hp++) {
152952118Smckusick 		if (!hp->high)
153052118Smckusick 			continue;
153152118Smckusick 		printf("%d: hi %x low %x\n", i, hp->high, hp->low);
153252118Smckusick 	}
153352118Smckusick }
153452118Smckusick #endif
1535