xref: /csrg-svn/sys/pmax/pmax/pmap.c (revision 64619)
152118Smckusick /*
263219Sbostic  * Copyright (c) 1992, 1993
363219Sbostic  *	The Regents of the University of California.  All rights reserved.
452118Smckusick  *
552118Smckusick  * This code is derived from software contributed to Berkeley by
652118Smckusick  * the Systems Programming Group of the University of Utah Computer
752118Smckusick  * Science Department and Ralph Campbell.
852118Smckusick  *
952118Smckusick  * %sccs.include.redist.c%
1052118Smckusick  *
11*64619Sbostic  *	@(#)pmap.c	8.2 (Berkeley) 09/23/93
1252118Smckusick  */
1352118Smckusick 
1452118Smckusick /*
1552118Smckusick  *	Manages physical address maps.
1652118Smckusick  *
1752118Smckusick  *	In addition to hardware address maps, this
1852118Smckusick  *	module is called upon to provide software-use-only
1952118Smckusick  *	maps which may or may not be stored in the same
2052118Smckusick  *	form as hardware maps.  These pseudo-maps are
2152118Smckusick  *	used to store intermediate results from copy
2252118Smckusick  *	operations to and from address spaces.
2352118Smckusick  *
2452118Smckusick  *	Since the information managed by this module is
2552118Smckusick  *	also stored by the logical address mapping module,
2652118Smckusick  *	this module may throw away valid virtual-to-physical
2752118Smckusick  *	mappings at almost any time.  However, invalidations
2852118Smckusick  *	of virtual-to-physical mappings must be done as
2952118Smckusick  *	requested.
3052118Smckusick  *
3152118Smckusick  *	In order to cope with hardware architectures which
3252118Smckusick  *	make virtual-to-physical map invalidates expensive,
3352118Smckusick  *	this module may delay invalidate or reduced protection
3452118Smckusick  *	operations until such time as they are actually
3552118Smckusick  *	necessary.  This module is given full information as
3652118Smckusick  *	to which processors are currently using which maps,
3752118Smckusick  *	and to when physical maps must be made correct.
3852118Smckusick  */
3952118Smckusick 
4056524Sbostic #include <sys/param.h>
4159842Sralph #include <sys/systm.h>
4256524Sbostic #include <sys/proc.h>
4356524Sbostic #include <sys/malloc.h>
4456524Sbostic #include <sys/user.h>
4559842Sralph #include <sys/buf.h>
4652118Smckusick 
4756524Sbostic #include <vm/vm_kern.h>
4856524Sbostic #include <vm/vm_page.h>
4959842Sralph #include <vm/vm_pageout.h>
5052118Smckusick 
5156524Sbostic #include <machine/machConst.h>
5256524Sbostic #include <machine/pte.h>
5352118Smckusick 
5459842Sralph extern vm_page_t vm_page_alloc1 __P((void));
5559842Sralph extern void vm_page_free1 __P((vm_page_t));
5659842Sralph 
5752118Smckusick /*
5852118Smckusick  * For each vm_page_t, there is a list of all currently valid virtual
5952118Smckusick  * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
6052118Smckusick  * XXX really should do this as a part of the higher level code.
6152118Smckusick  */
6252118Smckusick typedef struct pv_entry {
6352118Smckusick 	struct pv_entry	*pv_next;	/* next pv_entry */
6452118Smckusick 	struct pmap	*pv_pmap;	/* pmap where mapping lies */
6552118Smckusick 	vm_offset_t	pv_va;		/* virtual address for mapping */
6652118Smckusick } *pv_entry_t;
6752118Smckusick 
6852118Smckusick pv_entry_t	pv_table;	/* array of entries, one per page */
6952118Smckusick extern void	pmap_remove_pv();
7052118Smckusick 
7152118Smckusick #define pa_index(pa)		atop((pa) - first_phys_addr)
7252118Smckusick #define pa_to_pvh(pa)		(&pv_table[pa_index(pa)])
7352118Smckusick 
7452118Smckusick #ifdef DEBUG
7552118Smckusick struct {
7652118Smckusick 	int kernel;	/* entering kernel mapping */
7752118Smckusick 	int user;	/* entering user mapping */
7852118Smckusick 	int ptpneeded;	/* needed to allocate a PT page */
7952118Smckusick 	int pwchange;	/* no mapping change, just wiring or protection */
8052118Smckusick 	int wchange;	/* no mapping change, just wiring */
8152118Smckusick 	int mchange;	/* was mapped but mapping to different page */
8252118Smckusick 	int managed;	/* a managed page */
8352118Smckusick 	int firstpv;	/* first mapping for this PA */
8452118Smckusick 	int secondpv;	/* second mapping for this PA */
8552118Smckusick 	int ci;		/* cache inhibited */
8652118Smckusick 	int unmanaged;	/* not a managed page */
8752118Smckusick 	int flushes;	/* cache flushes */
8852118Smckusick 	int cachehit;	/* new entry forced valid entry out */
8952118Smckusick } enter_stats;
9052118Smckusick struct {
9152118Smckusick 	int calls;
9252118Smckusick 	int removes;
9352118Smckusick 	int flushes;
9452118Smckusick 	int pidflushes;	/* HW pid stolen */
9552118Smckusick 	int pvfirst;
9652118Smckusick 	int pvsearch;
9752118Smckusick } remove_stats;
9852118Smckusick 
9952118Smckusick int pmapdebug;
10052118Smckusick #define PDB_FOLLOW	0x0001
10152118Smckusick #define PDB_INIT	0x0002
10252118Smckusick #define PDB_ENTER	0x0004
10352118Smckusick #define PDB_REMOVE	0x0008
10452118Smckusick #define PDB_CREATE	0x0010
10552118Smckusick #define PDB_PTPAGE	0x0020
10659842Sralph #define PDB_PVENTRY	0x0040
10752118Smckusick #define PDB_BITS	0x0080
10852118Smckusick #define PDB_COLLECT	0x0100
10952118Smckusick #define PDB_PROTECT	0x0200
11052118Smckusick #define PDB_TLBPID	0x0400
11152118Smckusick #define PDB_PARANOIA	0x2000
11252118Smckusick #define PDB_WIRING	0x4000
11352118Smckusick #define PDB_PVDUMP	0x8000
11452118Smckusick 
11552118Smckusick #endif /* DEBUG */
11652118Smckusick 
11752118Smckusick struct pmap	kernel_pmap_store;
11852118Smckusick 
11952118Smckusick vm_offset_t    	avail_start;	/* PA of first available physical page */
12052118Smckusick vm_offset_t	avail_end;	/* PA of last available physical page */
12152118Smckusick vm_size_t	mem_size;	/* memory size in bytes */
12252118Smckusick vm_offset_t	virtual_avail;  /* VA of first avail page (after kernel bss)*/
12352118Smckusick vm_offset_t	virtual_end;	/* VA of last avail page (end of kernel AS) */
12452118Smckusick int		pmaxpagesperpage;	/* PAGE_SIZE / NBPG */
12552118Smckusick #ifdef ATTR
12652118Smckusick char		*pmap_attributes;	/* reference and modify bits */
12752118Smckusick #endif
12859842Sralph struct segtab	*free_segtab;		/* free list kept locally */
12959842Sralph u_int		tlbpid_gen = 1;		/* TLB PID generation count */
13059842Sralph int		tlbpid_cnt = 2;		/* next available TLB PID */
13159842Sralph pt_entry_t	*Sysmap;		/* kernel pte table */
13259842Sralph u_int		Sysmapsize;		/* number of pte's in Sysmap */
13352118Smckusick 
13452118Smckusick /*
13552118Smckusick  *	Bootstrap the system enough to run with virtual memory.
13659842Sralph  *	firstaddr is the first unused kseg0 address (not page aligned).
13752118Smckusick  */
13852118Smckusick void
13952118Smckusick pmap_bootstrap(firstaddr)
14052118Smckusick 	vm_offset_t firstaddr;
14152118Smckusick {
14252118Smckusick 	register int i;
14352118Smckusick 	vm_offset_t start = firstaddr;
14452118Smckusick 	extern int maxmem, physmem;
14552118Smckusick 
14659842Sralph #define	valloc(name, type, num) \
14759842Sralph 	    (name) = (type *)firstaddr; firstaddr = (vm_offset_t)((name)+(num))
14852118Smckusick 	/*
14959842Sralph 	 * Allocate a PTE table for the kernel.
15059842Sralph 	 * The first '256' comes from PAGER_MAP_SIZE in vm_pager_init().
15159842Sralph 	 * This should be kept in sync.
15259842Sralph 	 * We also reserve space for kmem_alloc_pageable() for vm_fork().
15352118Smckusick 	 */
15459842Sralph 	Sysmapsize = (VM_KMEM_SIZE + VM_MBUF_SIZE + VM_PHYS_SIZE +
15559842Sralph 		nbuf * MAXBSIZE + 16 * NCARGS) / NBPG + 256 + 256;
15659842Sralph 	valloc(Sysmap, pt_entry_t, Sysmapsize);
15759842Sralph #ifdef ATTR
15859842Sralph 	valloc(pmap_attributes, char, physmem);
15959842Sralph #endif
16052118Smckusick 	/*
16152118Smckusick 	 * Allocate memory for pv_table.
16252118Smckusick 	 * This will allocate more entries than we really need.
16359842Sralph 	 * We could do this in pmap_init when we know the actual
16459842Sralph 	 * phys_start and phys_end but its better to use kseg0 addresses
16552118Smckusick 	 * rather than kernel virtual addresses mapped through the TLB.
16652118Smckusick 	 */
16759842Sralph 	i = maxmem - pmax_btop(MACH_CACHED_TO_PHYS(firstaddr));
16859842Sralph 	valloc(pv_table, struct pv_entry, i);
16952118Smckusick 
17052118Smckusick 	/*
17152118Smckusick 	 * Clear allocated memory.
17252118Smckusick 	 */
17359842Sralph 	firstaddr = pmax_round_page(firstaddr);
17459842Sralph 	bzero((caddr_t)start, firstaddr - start);
17552118Smckusick 
17659842Sralph 	avail_start = MACH_CACHED_TO_PHYS(firstaddr);
17752118Smckusick 	avail_end = pmax_ptob(maxmem);
17852118Smckusick 	mem_size = avail_end - avail_start;
17952118Smckusick 
18052118Smckusick 	virtual_avail = VM_MIN_KERNEL_ADDRESS;
18159842Sralph 	virtual_end = VM_MIN_KERNEL_ADDRESS + Sysmapsize * NBPG;
18252118Smckusick 	/* XXX need to decide how to set cnt.v_page_size */
18352118Smckusick 	pmaxpagesperpage = 1;
18452118Smckusick 
18552937Sralph 	simple_lock_init(&kernel_pmap_store.pm_lock);
18652937Sralph 	kernel_pmap_store.pm_count = 1;
18752118Smckusick }
18852118Smckusick 
18952118Smckusick /*
19052118Smckusick  * Bootstrap memory allocator. This function allows for early dynamic
19152118Smckusick  * memory allocation until the virtual memory system has been bootstrapped.
19252118Smckusick  * After that point, either kmem_alloc or malloc should be used. This
19352118Smckusick  * function works by stealing pages from the (to be) managed page pool,
19452118Smckusick  * stealing virtual address space, then mapping the pages and zeroing them.
19552118Smckusick  *
19652118Smckusick  * It should be used from pmap_bootstrap till vm_page_startup, afterwards
19752118Smckusick  * it cannot be used, and will generate a panic if tried. Note that this
19852118Smckusick  * memory will never be freed, and in essence it is wired down.
19952118Smckusick  */
20052118Smckusick void *
20152118Smckusick pmap_bootstrap_alloc(size)
20252118Smckusick 	int size;
20352118Smckusick {
20452118Smckusick 	vm_offset_t val;
20552118Smckusick 	extern boolean_t vm_page_startup_initialized;
20652118Smckusick 
20752118Smckusick 	if (vm_page_startup_initialized)
20852118Smckusick 		panic("pmap_bootstrap_alloc: called after startup initialized");
20952118Smckusick 
21052741Sralph 	val = MACH_PHYS_TO_CACHED(avail_start);
21152118Smckusick 	size = round_page(size);
21252118Smckusick 	avail_start += size;
21352118Smckusick 
21452741Sralph 	blkclr((caddr_t)val, size);
21552741Sralph 	return ((void *)val);
21652118Smckusick }
21752118Smckusick 
21852118Smckusick /*
21952118Smckusick  *	Initialize the pmap module.
22052118Smckusick  *	Called by vm_init, to initialize any structures that the pmap
22152118Smckusick  *	system needs to map virtual memory.
22252118Smckusick  */
22352118Smckusick void
22452118Smckusick pmap_init(phys_start, phys_end)
22552118Smckusick 	vm_offset_t phys_start, phys_end;
22652118Smckusick {
22752118Smckusick 
22852118Smckusick #ifdef DEBUG
22959842Sralph 	if (pmapdebug & (PDB_FOLLOW|PDB_INIT))
23052118Smckusick 		printf("pmap_init(%x, %x)\n", phys_start, phys_end);
23152118Smckusick #endif
23252118Smckusick }
23352118Smckusick 
23452118Smckusick /*
23552118Smckusick  *	Create and return a physical map.
23652118Smckusick  *
23752118Smckusick  *	If the size specified for the map
23852118Smckusick  *	is zero, the map is an actual physical
23952118Smckusick  *	map, and may be referenced by the
24052118Smckusick  *	hardware.
24152118Smckusick  *
24252118Smckusick  *	If the size specified is non-zero,
24352118Smckusick  *	the map will be used in software only, and
24452118Smckusick  *	is bounded by that size.
24552118Smckusick  */
24652118Smckusick pmap_t
24752118Smckusick pmap_create(size)
24852118Smckusick 	vm_size_t size;
24952118Smckusick {
25052118Smckusick 	register pmap_t pmap;
25152118Smckusick 
25252118Smckusick #ifdef DEBUG
25352118Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
25452118Smckusick 		printf("pmap_create(%x)\n", size);
25552118Smckusick #endif
25652118Smckusick 	/*
25752118Smckusick 	 * Software use map does not need a pmap
25852118Smckusick 	 */
25952118Smckusick 	if (size)
26052741Sralph 		return (NULL);
26152118Smckusick 
26252118Smckusick 	/* XXX: is it ok to wait here? */
26352118Smckusick 	pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
26452118Smckusick #ifdef notifwewait
26552118Smckusick 	if (pmap == NULL)
26652118Smckusick 		panic("pmap_create: cannot allocate a pmap");
26752118Smckusick #endif
26852118Smckusick 	bzero(pmap, sizeof(*pmap));
26952118Smckusick 	pmap_pinit(pmap);
27052118Smckusick 	return (pmap);
27152118Smckusick }
27252118Smckusick 
27352118Smckusick /*
27452118Smckusick  * Initialize a preallocated and zeroed pmap structure,
27552118Smckusick  * such as one in a vmspace structure.
27652118Smckusick  */
27752118Smckusick void
27852118Smckusick pmap_pinit(pmap)
27952118Smckusick 	register struct pmap *pmap;
28052118Smckusick {
28152118Smckusick 	register int i;
28259842Sralph 	int s;
28352118Smckusick 	extern struct vmspace vmspace0;
28459842Sralph 	extern struct user *proc0paddr;
28552118Smckusick 
28652118Smckusick #ifdef DEBUG
28752118Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
28852118Smckusick 		printf("pmap_pinit(%x)\n", pmap);
28952118Smckusick #endif
29052118Smckusick 	simple_lock_init(&pmap->pm_lock);
29152118Smckusick 	pmap->pm_count = 1;
29259842Sralph 	if (free_segtab) {
29359842Sralph 		s = splimp();
29459842Sralph 		pmap->pm_segtab = free_segtab;
29559842Sralph 		free_segtab = *(struct segtab **)free_segtab;
29659842Sralph 		pmap->pm_segtab->seg_tab[0] = NULL;
29759842Sralph 		splx(s);
29859842Sralph 	} else {
29959842Sralph 		register struct segtab *stp;
30059842Sralph 		vm_page_t mem;
30159842Sralph 
30259842Sralph 		mem = vm_page_alloc1();
30359842Sralph 		pmap_zero_page(VM_PAGE_TO_PHYS(mem));
30459842Sralph 		pmap->pm_segtab = stp = (struct segtab *)
30559842Sralph 			MACH_PHYS_TO_CACHED(VM_PAGE_TO_PHYS(mem));
30659842Sralph 		i = pmaxpagesperpage * (NBPG / sizeof(struct segtab));
30759842Sralph 		s = splimp();
30859842Sralph 		while (--i != 0) {
30959842Sralph 			stp++;
31059842Sralph 			*(struct segtab **)stp = free_segtab;
31159842Sralph 			free_segtab = stp;
31259842Sralph 		}
31359842Sralph 		splx(s);
31459842Sralph 	}
31559842Sralph #ifdef DIAGNOSTIC
31659842Sralph 	for (i = 0; i < PMAP_SEGTABSIZE; i++)
31759842Sralph 		if (pmap->pm_segtab->seg_tab[i] != 0)
31859842Sralph 			panic("pmap_pinit: pm_segtab != 0");
31959842Sralph #endif
32059842Sralph 	if (pmap == &vmspace0.vm_pmap) {
32159842Sralph 		/*
32259842Sralph 		 * The initial process has already been allocated a TLBPID
32359842Sralph 		 * in mach_init().
32459842Sralph 		 */
32559842Sralph 		pmap->pm_tlbpid = 1;
32659842Sralph 		pmap->pm_tlbgen = tlbpid_gen;
32759842Sralph 		proc0paddr->u_pcb.pcb_segtab = (void *)pmap->pm_segtab;
32859842Sralph 	} else {
32959842Sralph 		pmap->pm_tlbpid = 0;
33059842Sralph 		pmap->pm_tlbgen = 0;
33159842Sralph 	}
33252118Smckusick }
33352118Smckusick 
33452118Smckusick /*
33552118Smckusick  *	Retire the given physical map from service.
33652118Smckusick  *	Should only be called if the map contains
33752118Smckusick  *	no valid mappings.
33852118Smckusick  */
33952118Smckusick void
34052118Smckusick pmap_destroy(pmap)
34152118Smckusick 	register pmap_t pmap;
34252118Smckusick {
34352118Smckusick 	int count;
34452118Smckusick 
34552118Smckusick #ifdef DEBUG
34659842Sralph 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
34752118Smckusick 		printf("pmap_destroy(%x)\n", pmap);
34852118Smckusick #endif
34952118Smckusick 	if (pmap == NULL)
35052118Smckusick 		return;
35152118Smckusick 
35252118Smckusick 	simple_lock(&pmap->pm_lock);
35352118Smckusick 	count = --pmap->pm_count;
35452118Smckusick 	simple_unlock(&pmap->pm_lock);
35552118Smckusick 	if (count == 0) {
35652118Smckusick 		pmap_release(pmap);
35752118Smckusick 		free((caddr_t)pmap, M_VMPMAP);
35852118Smckusick 	}
35952118Smckusick }
36052118Smckusick 
36152118Smckusick /*
36252118Smckusick  * Release any resources held by the given physical map.
36352118Smckusick  * Called when a pmap initialized by pmap_pinit is being released.
36452118Smckusick  * Should only be called if the map contains no valid mappings.
36552118Smckusick  */
36652118Smckusick void
36752118Smckusick pmap_release(pmap)
36852118Smckusick 	register pmap_t pmap;
36952118Smckusick {
37052118Smckusick 
37152118Smckusick #ifdef DEBUG
37259842Sralph 	if (pmapdebug & (PDB_FOLLOW|PDB_CREATE))
37352118Smckusick 		printf("pmap_release(%x)\n", pmap);
37452118Smckusick #endif
37552118Smckusick 
37659842Sralph 	if (pmap->pm_segtab) {
37759842Sralph 		register pt_entry_t *pte;
37859842Sralph 		register int i;
37959842Sralph 		int s;
38052118Smckusick #ifdef DIAGNOSTIC
38159842Sralph 		register int j;
38252118Smckusick #endif
38359842Sralph 
38459842Sralph 		for (i = 0; i < PMAP_SEGTABSIZE; i++) {
38559842Sralph 			/* get pointer to segment map */
38659842Sralph 			pte = pmap->pm_segtab->seg_tab[i];
38759842Sralph 			if (!pte)
38859842Sralph 				continue;
38959842Sralph 			vm_page_free1(
39059842Sralph 				PHYS_TO_VM_PAGE(MACH_CACHED_TO_PHYS(pte)));
39152118Smckusick #ifdef DIAGNOSTIC
39259842Sralph 			for (j = 0; j < NPTEPG; j++) {
39359842Sralph 				if (pte->pt_entry)
39459842Sralph 					panic("pmap_release: segmap not empty");
39559842Sralph 			}
39652118Smckusick #endif
39759842Sralph 			pmap->pm_segtab->seg_tab[i] = NULL;
39859842Sralph 		}
39959842Sralph 		s = splimp();
40059842Sralph 		*(struct segtab **)pmap->pm_segtab = free_segtab;
40159842Sralph 		free_segtab = pmap->pm_segtab;
40259842Sralph 		splx(s);
40359842Sralph 		pmap->pm_segtab = NULL;
40459842Sralph 	}
40552118Smckusick }
40652118Smckusick 
40752118Smckusick /*
40852118Smckusick  *	Add a reference to the specified pmap.
40952118Smckusick  */
41052118Smckusick void
41152118Smckusick pmap_reference(pmap)
41252118Smckusick 	pmap_t pmap;
41352118Smckusick {
41452118Smckusick 
41552118Smckusick #ifdef DEBUG
41652118Smckusick 	if (pmapdebug & PDB_FOLLOW)
41752118Smckusick 		printf("pmap_reference(%x)\n", pmap);
41852118Smckusick #endif
41952118Smckusick 	if (pmap != NULL) {
42052118Smckusick 		simple_lock(&pmap->pm_lock);
42152118Smckusick 		pmap->pm_count++;
42252118Smckusick 		simple_unlock(&pmap->pm_lock);
42352118Smckusick 	}
42452118Smckusick }
42552118Smckusick 
42652118Smckusick /*
42752118Smckusick  *	Remove the given range of addresses from the specified map.
42852118Smckusick  *
42952118Smckusick  *	It is assumed that the start and end are properly
43052118Smckusick  *	rounded to the page size.
43152118Smckusick  */
43252118Smckusick void
43352118Smckusick pmap_remove(pmap, sva, eva)
43452118Smckusick 	register pmap_t pmap;
43552118Smckusick 	vm_offset_t sva, eva;
43652118Smckusick {
43759842Sralph 	register vm_offset_t nssva;
43859842Sralph 	register pt_entry_t *pte;
43952118Smckusick 	unsigned entry;
44052118Smckusick 
44152118Smckusick #ifdef DEBUG
44252118Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_REMOVE|PDB_PROTECT))
44352118Smckusick 		printf("pmap_remove(%x, %x, %x)\n", pmap, sva, eva);
44452118Smckusick 	remove_stats.calls++;
44552118Smckusick #endif
44652118Smckusick 	if (pmap == NULL)
44752118Smckusick 		return;
44852118Smckusick 
44959842Sralph 	if (!pmap->pm_segtab) {
45052118Smckusick 		register pt_entry_t *pte;
45152118Smckusick 
45252118Smckusick 		/* remove entries from kernel pmap */
45352741Sralph #ifdef DIAGNOSTIC
45459842Sralph 		if (sva < VM_MIN_KERNEL_ADDRESS || eva > virtual_end)
45559842Sralph 			panic("pmap_remove: kva not in range");
45652741Sralph #endif
45752118Smckusick 		pte = kvtopte(sva);
45859842Sralph 		for (; sva < eva; sva += NBPG, pte++) {
45952118Smckusick 			entry = pte->pt_entry;
46052118Smckusick 			if (!(entry & PG_V))
46152118Smckusick 				continue;
46252118Smckusick 			if (entry & PG_WIRED)
46352118Smckusick 				pmap->pm_stats.wired_count--;
46452118Smckusick 			pmap->pm_stats.resident_count--;
46559842Sralph 			pmap_remove_pv(pmap, sva, entry & PG_FRAME);
46652118Smckusick #ifdef ATTR
46759842Sralph 			pmap_attributes[atop(entry & PG_FRAME)] = 0;
46852118Smckusick #endif
46952118Smckusick 			pte->pt_entry = PG_NV;
47052118Smckusick 			/*
47152118Smckusick 			 * Flush the TLB for the given address.
47252118Smckusick 			 */
47359842Sralph 			MachTLBFlushAddr(sva);
47452118Smckusick #ifdef DEBUG
47552118Smckusick 			remove_stats.flushes++;
47652118Smckusick #endif
47752118Smckusick 		}
47852118Smckusick 		return;
47952118Smckusick 	}
48052118Smckusick 
48159842Sralph #ifdef DIAGNOSTIC
48259842Sralph 	if (eva > VM_MAXUSER_ADDRESS)
48359842Sralph 		panic("pmap_remove: uva not in range");
48459842Sralph #endif
48559842Sralph 	while (sva < eva) {
48659842Sralph 		nssva = pmax_trunc_seg(sva) + NBSEG;
48759842Sralph 		if (nssva == 0 || nssva > eva)
48859842Sralph 			nssva = eva;
48959842Sralph 		/*
49059842Sralph 		 * If VA belongs to an unallocated segment,
49159842Sralph 		 * skip to the next segment boundary.
49259842Sralph 		 */
49359842Sralph 		if (!(pte = pmap_segmap(pmap, sva))) {
49459842Sralph 			sva = nssva;
49559842Sralph 			continue;
49659842Sralph 		}
49759842Sralph 		/*
49859842Sralph 		 * Invalidate every valid mapping within this segment.
49959842Sralph 		 */
50059842Sralph 		pte += (sva >> PGSHIFT) & (NPTEPG - 1);
50159842Sralph 		for (; sva < nssva; sva += NBPG, pte++) {
50259842Sralph 			entry = pte->pt_entry;
50359842Sralph 			if (!(entry & PG_V))
50452118Smckusick 				continue;
50552118Smckusick 			if (entry & PG_WIRED)
50652118Smckusick 				pmap->pm_stats.wired_count--;
50752118Smckusick 			pmap->pm_stats.resident_count--;
50859842Sralph 			pmap_remove_pv(pmap, sva, entry & PG_FRAME);
50952118Smckusick #ifdef ATTR
51059842Sralph 			pmap_attributes[atop(entry & PG_FRAME)] = 0;
51152118Smckusick #endif
51259842Sralph 			pte->pt_entry = PG_NV;
51359842Sralph 			/*
51459842Sralph 			 * Flush the TLB for the given address.
51559842Sralph 			 */
51659842Sralph 			if (pmap->pm_tlbgen == tlbpid_gen) {
51759842Sralph 				MachTLBFlushAddr(sva | (pmap->pm_tlbpid <<
51859842Sralph 					VMMACH_TLB_PID_SHIFT));
51952118Smckusick #ifdef DEBUG
52059842Sralph 				remove_stats.flushes++;
52152118Smckusick #endif
52259842Sralph 			}
52352118Smckusick 		}
52452118Smckusick 	}
52552118Smckusick }
52652118Smckusick 
52752118Smckusick /*
52852118Smckusick  *	pmap_page_protect:
52952118Smckusick  *
53052118Smckusick  *	Lower the permission for all mappings to a given page.
53152118Smckusick  */
53252118Smckusick void
53352118Smckusick pmap_page_protect(pa, prot)
53452118Smckusick 	vm_offset_t pa;
53552118Smckusick 	vm_prot_t prot;
53652118Smckusick {
53752118Smckusick 	register pv_entry_t pv;
53852118Smckusick 	register vm_offset_t va;
53952118Smckusick 	int s;
54052118Smckusick 
54152118Smckusick #ifdef DEBUG
54252118Smckusick 	if ((pmapdebug & (PDB_FOLLOW|PDB_PROTECT)) ||
54352118Smckusick 	    prot == VM_PROT_NONE && (pmapdebug & PDB_REMOVE))
54452118Smckusick 		printf("pmap_page_protect(%x, %x)\n", pa, prot);
54552118Smckusick #endif
54652118Smckusick 	if (!IS_VM_PHYSADDR(pa))
54752118Smckusick 		return;
54852118Smckusick 
54952118Smckusick 	switch (prot) {
55059842Sralph 	case VM_PROT_READ|VM_PROT_WRITE:
55152118Smckusick 	case VM_PROT_ALL:
55252118Smckusick 		break;
55352118Smckusick 
55452118Smckusick 	/* copy_on_write */
55552118Smckusick 	case VM_PROT_READ:
55652118Smckusick 	case VM_PROT_READ|VM_PROT_EXECUTE:
55752118Smckusick 		pv = pa_to_pvh(pa);
55852118Smckusick 		s = splimp();
55952118Smckusick 		/*
56052118Smckusick 		 * Loop over all current mappings setting/clearing as appropos.
56152118Smckusick 		 */
56252118Smckusick 		if (pv->pv_pmap != NULL) {
56352118Smckusick 			for (; pv; pv = pv->pv_next) {
56452118Smckusick 				extern vm_offset_t pager_sva, pager_eva;
56559842Sralph 
56652118Smckusick 				va = pv->pv_va;
56752118Smckusick 
56852118Smckusick 				/*
56952118Smckusick 				 * XXX don't write protect pager mappings
57052118Smckusick 				 */
57152118Smckusick 				if (va >= pager_sva && va < pager_eva)
57252118Smckusick 					continue;
57352118Smckusick 				pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE,
57452118Smckusick 					prot);
57552118Smckusick 			}
57652118Smckusick 		}
57752118Smckusick 		splx(s);
57852118Smckusick 		break;
57952118Smckusick 
58052118Smckusick 	/* remove_all */
58152118Smckusick 	default:
58252118Smckusick 		pv = pa_to_pvh(pa);
58352118Smckusick 		s = splimp();
58452118Smckusick 		while (pv->pv_pmap != NULL) {
58552118Smckusick 			pmap_remove(pv->pv_pmap, pv->pv_va,
58652118Smckusick 				    pv->pv_va + PAGE_SIZE);
58752118Smckusick 		}
58852118Smckusick 		splx(s);
58952118Smckusick 	}
59052118Smckusick }
59152118Smckusick 
59252118Smckusick /*
59352118Smckusick  *	Set the physical protection on the
59452118Smckusick  *	specified range of this map as requested.
59552118Smckusick  */
59652118Smckusick void
59752118Smckusick pmap_protect(pmap, sva, eva, prot)
59852118Smckusick 	register pmap_t pmap;
59952118Smckusick 	vm_offset_t sva, eva;
60052118Smckusick 	vm_prot_t prot;
60152118Smckusick {
60259842Sralph 	register vm_offset_t nssva;
60359842Sralph 	register pt_entry_t *pte;
60459842Sralph 	register unsigned entry;
60552118Smckusick 	u_int p;
60652118Smckusick 
60752118Smckusick #ifdef DEBUG
60852118Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_PROTECT))
60952118Smckusick 		printf("pmap_protect(%x, %x, %x, %x)\n", pmap, sva, eva, prot);
61052118Smckusick #endif
61152118Smckusick 	if (pmap == NULL)
61252118Smckusick 		return;
61352118Smckusick 
61459842Sralph 	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
61552118Smckusick 		pmap_remove(pmap, sva, eva);
61652118Smckusick 		return;
61752118Smckusick 	}
61852118Smckusick 
61959842Sralph 	p = (prot & VM_PROT_WRITE) ? PG_RW : PG_RO;
62052118Smckusick 
62159842Sralph 	if (!pmap->pm_segtab) {
62252118Smckusick 		/*
62352118Smckusick 		 * Change entries in kernel pmap.
62452118Smckusick 		 * This will trap if the page is writeable (in order to set
62552118Smckusick 		 * the dirty bit) even if the dirty bit is already set. The
62652118Smckusick 		 * optimization isn't worth the effort since this code isn't
62752118Smckusick 		 * executed much. The common case is to make a user page
62852118Smckusick 		 * read-only.
62952118Smckusick 		 */
63052741Sralph #ifdef DIAGNOSTIC
63159842Sralph 		if (sva < VM_MIN_KERNEL_ADDRESS || eva > virtual_end)
63259842Sralph 			panic("pmap_protect: kva not in range");
63352741Sralph #endif
63452118Smckusick 		pte = kvtopte(sva);
63559842Sralph 		for (; sva < eva; sva += NBPG, pte++) {
63659842Sralph 			entry = pte->pt_entry;
63759842Sralph 			if (!(entry & PG_V))
63852118Smckusick 				continue;
63959842Sralph 			entry = (entry & ~(PG_M | PG_RO)) | p;
64059842Sralph 			pte->pt_entry = entry;
64152118Smckusick 			/*
64252118Smckusick 			 * Update the TLB if the given address is in the cache.
64352118Smckusick 			 */
64459842Sralph 			MachTLBUpdate(sva, entry);
64552118Smckusick 		}
64652118Smckusick 		return;
64752118Smckusick 	}
64852118Smckusick 
64959842Sralph #ifdef DIAGNOSTIC
65059842Sralph 	if (eva > VM_MAXUSER_ADDRESS)
65159842Sralph 		panic("pmap_protect: uva not in range");
65259842Sralph #endif
65359842Sralph 	while (sva < eva) {
65459842Sralph 		nssva = pmax_trunc_seg(sva) + NBSEG;
65559842Sralph 		if (nssva == 0 || nssva > eva)
65659842Sralph 			nssva = eva;
65759842Sralph 		/*
65859842Sralph 		 * If VA belongs to an unallocated segment,
65959842Sralph 		 * skip to the next segment boundary.
66059842Sralph 		 */
66159842Sralph 		if (!(pte = pmap_segmap(pmap, sva))) {
66259842Sralph 			sva = nssva;
66359842Sralph 			continue;
66459842Sralph 		}
66559842Sralph 		/*
66659842Sralph 		 * Change protection on every valid mapping within this segment.
66759842Sralph 		 */
66859842Sralph 		pte += (sva >> PGSHIFT) & (NPTEPG - 1);
66959842Sralph 		for (; sva < nssva; sva += NBPG, pte++) {
67059842Sralph 			entry = pte->pt_entry;
67159842Sralph 			if (!(entry & PG_V))
67252118Smckusick 				continue;
67359842Sralph 			entry = (entry & ~(PG_M | PG_RO)) | p;
67459842Sralph 			pte->pt_entry = entry;
67559842Sralph 			/*
67659842Sralph 			 * Update the TLB if the given address is in the cache.
67759842Sralph 			 */
67859842Sralph 			if (pmap->pm_tlbgen == tlbpid_gen)
67959842Sralph 				MachTLBUpdate(sva | (pmap->pm_tlbpid <<
68059842Sralph 					VMMACH_TLB_PID_SHIFT), entry);
68152118Smckusick 		}
68252118Smckusick 	}
68352118Smckusick }
68452118Smckusick 
68552118Smckusick /*
68652118Smckusick  *	Insert the given physical page (p) at
68752118Smckusick  *	the specified virtual address (v) in the
68852118Smckusick  *	target physical map with the protection requested.
68952118Smckusick  *
69052118Smckusick  *	If specified, the page will be wired down, meaning
69152118Smckusick  *	that the related pte can not be reclaimed.
69252118Smckusick  *
69352118Smckusick  *	NB:  This is the only routine which MAY NOT lazy-evaluate
69452118Smckusick  *	or lose information.  That is, this routine must actually
69552118Smckusick  *	insert this page into the given map NOW.
69652118Smckusick  */
69752118Smckusick void
69852118Smckusick pmap_enter(pmap, va, pa, prot, wired)
69952118Smckusick 	register pmap_t pmap;
70052118Smckusick 	vm_offset_t va;
70152118Smckusick 	register vm_offset_t pa;
70252118Smckusick 	vm_prot_t prot;
70352118Smckusick 	boolean_t wired;
70452118Smckusick {
70559842Sralph 	register pt_entry_t *pte;
70652118Smckusick 	register u_int npte;
70753718Smckusick 	register int i, j;
70859842Sralph 	vm_page_t mem;
70952118Smckusick 
71052118Smckusick #ifdef DEBUG
71152118Smckusick 	if (pmapdebug & (PDB_FOLLOW|PDB_ENTER))
71252118Smckusick 		printf("pmap_enter(%x, %x, %x, %x, %x)\n",
71352118Smckusick 		       pmap, va, pa, prot, wired);
71452118Smckusick #endif
71552118Smckusick #ifdef DIAGNOSTIC
71652118Smckusick 	if (!pmap)
71752118Smckusick 		panic("pmap_enter: pmap");
71859842Sralph 	if (!pmap->pm_segtab) {
71952118Smckusick 		enter_stats.kernel++;
72059842Sralph 		if (va < VM_MIN_KERNEL_ADDRESS || va >= virtual_end)
72152118Smckusick 			panic("pmap_enter: kva");
72252118Smckusick 	} else {
72352118Smckusick 		enter_stats.user++;
72459842Sralph 		if (va >= VM_MAXUSER_ADDRESS)
72552118Smckusick 			panic("pmap_enter: uva");
72652118Smckusick 	}
72752741Sralph 	if (pa & 0x80000000)
72852741Sralph 		panic("pmap_enter: pa");
72952118Smckusick 	if (!(prot & VM_PROT_READ))
73052118Smckusick 		panic("pmap_enter: prot");
73152118Smckusick #endif
73252118Smckusick 
73352118Smckusick 	if (IS_VM_PHYSADDR(pa)) {
73452118Smckusick 		register pv_entry_t pv, npv;
73552118Smckusick 		int s;
73652118Smckusick 
73752118Smckusick 		if (!(prot & VM_PROT_WRITE))
73852118Smckusick 			npte = PG_RO;
73952118Smckusick 		else {
74052118Smckusick 			register vm_page_t mem;
74152118Smckusick 
74252118Smckusick 			mem = PHYS_TO_VM_PAGE(pa);
74352118Smckusick 			if ((int)va < 0) {
74452118Smckusick 				/*
74552118Smckusick 				 * Don't bother to trap on kernel writes,
74652118Smckusick 				 * just record page as dirty.
74752118Smckusick 				 */
74852118Smckusick 				npte = PG_M;
74956633Sralph 				mem->flags &= ~PG_CLEAN;
75052118Smckusick 			} else
75152118Smckusick #ifdef ATTR
75259842Sralph 				if ((pmap_attributes[atop(pa)] &
75356633Sralph 				    PMAP_ATTR_MOD) || !(mem->flags & PG_CLEAN))
75452118Smckusick #else
75556633Sralph 				if (!(mem->flags & PG_CLEAN))
75652118Smckusick #endif
75752118Smckusick 					npte = PG_M;
75852118Smckusick 			else
75952118Smckusick 				npte = 0;
76052118Smckusick 		}
76152118Smckusick 
76252118Smckusick #ifdef DEBUG
76352118Smckusick 		enter_stats.managed++;
76452118Smckusick #endif
76552118Smckusick 		/*
76652118Smckusick 		 * Enter the pmap and virtual address into the
76752118Smckusick 		 * physical to virtual map table.
76852118Smckusick 		 */
76952118Smckusick 		pv = pa_to_pvh(pa);
77052118Smckusick 		s = splimp();
77152118Smckusick #ifdef DEBUG
77252118Smckusick 		if (pmapdebug & PDB_ENTER)
77352118Smckusick 			printf("pmap_enter: pv %x: was %x/%x/%x\n",
77452118Smckusick 			       pv, pv->pv_va, pv->pv_pmap, pv->pv_next);
77552118Smckusick #endif
77652118Smckusick 		if (pv->pv_pmap == NULL) {
77752118Smckusick 			/*
77852118Smckusick 			 * No entries yet, use header as the first entry
77952118Smckusick 			 */
78052118Smckusick #ifdef DEBUG
78159842Sralph 			if (pmapdebug & PDB_PVENTRY)
78259842Sralph 				printf("pmap_enter: first pv: pmap %x va %x\n",
78359842Sralph 					pmap, va);
78452118Smckusick 			enter_stats.firstpv++;
78552118Smckusick #endif
78652118Smckusick 			pv->pv_va = va;
78752118Smckusick 			pv->pv_pmap = pmap;
78852118Smckusick 			pv->pv_next = NULL;
78952118Smckusick 		} else {
79052118Smckusick 			/*
79152118Smckusick 			 * There is at least one other VA mapping this page.
79252118Smckusick 			 * Place this entry after the header.
79352118Smckusick 			 *
79452118Smckusick 			 * Note: the entry may already be in the table if
79552118Smckusick 			 * we are only changing the protection bits.
79652118Smckusick 			 */
79752118Smckusick 			for (npv = pv; npv; npv = npv->pv_next)
79852118Smckusick 				if (pmap == npv->pv_pmap && va == npv->pv_va) {
79952118Smckusick #ifdef DIAGNOSTIC
80052118Smckusick 					unsigned entry;
80152118Smckusick 
80259842Sralph 					if (!pmap->pm_segtab)
80359842Sralph 						entry = kvtopte(va)->pt_entry;
80459842Sralph 					else {
80559842Sralph 						pte = pmap_segmap(pmap, va);
80659842Sralph 						if (pte) {
80759842Sralph 							pte += (va >> PGSHIFT) &
80859842Sralph 							    (NPTEPG - 1);
80959842Sralph 							entry = pte->pt_entry;
81059842Sralph 						} else
81159842Sralph 							entry = 0;
81259842Sralph 					}
81352118Smckusick 					if (!(entry & PG_V) ||
81452118Smckusick 					    (entry & PG_FRAME) != pa)
81559842Sralph 						printf(
81659842Sralph 			"pmap_enter: found va %x pa %x in pv_table but != %x\n",
81759842Sralph 							va, pa, entry);
81852118Smckusick #endif
81952118Smckusick 					goto fnd;
82052118Smckusick 				}
82159842Sralph #ifdef DEBUG
82259842Sralph 			if (pmapdebug & PDB_PVENTRY)
82359842Sralph 				printf("pmap_enter: new pv: pmap %x va %x\n",
82459842Sralph 					pmap, va);
82559842Sralph #endif
82652118Smckusick 			/* can this cause us to recurse forever? */
82752118Smckusick 			npv = (pv_entry_t)
82852118Smckusick 				malloc(sizeof *npv, M_VMPVENT, M_NOWAIT);
82952118Smckusick 			npv->pv_va = va;
83052118Smckusick 			npv->pv_pmap = pmap;
83152118Smckusick 			npv->pv_next = pv->pv_next;
83252118Smckusick 			pv->pv_next = npv;
83352118Smckusick #ifdef DEBUG
83452118Smckusick 			if (!npv->pv_next)
83552118Smckusick 				enter_stats.secondpv++;
83652118Smckusick #endif
83752118Smckusick 		fnd:
83852118Smckusick 			;
83952118Smckusick 		}
84052118Smckusick 		splx(s);
84152118Smckusick 	} else {
84252118Smckusick 		/*
84352118Smckusick 		 * Assumption: if it is not part of our managed memory
84452118Smckusick 		 * then it must be device memory which may be volitile.
84552118Smckusick 		 */
84652118Smckusick #ifdef DEBUG
84752118Smckusick 		enter_stats.unmanaged++;
84852118Smckusick #endif
84958979Sralph 		npte = (prot & VM_PROT_WRITE) ? (PG_M | PG_N) : (PG_RO | PG_N);
85052118Smckusick 	}
85152118Smckusick 
85252741Sralph 	/*
85352741Sralph 	 * The only time we need to flush the cache is if we
85452741Sralph 	 * execute from a physical address and then change the data.
85552741Sralph 	 * This is the best place to do this.
85652741Sralph 	 * pmap_protect() and pmap_remove() are mostly used to switch
85752741Sralph 	 * between R/W and R/O pages.
85852741Sralph 	 * NOTE: we only support cache flush for read only text.
85952741Sralph 	 */
86052741Sralph 	if (prot == (VM_PROT_READ | VM_PROT_EXECUTE))
86153611Sralph 		MachFlushICache(MACH_PHYS_TO_CACHED(pa), PAGE_SIZE);
86252741Sralph 
86359842Sralph 	if (!pmap->pm_segtab) {
86452118Smckusick 		/* enter entries into kernel pmap */
86552118Smckusick 		pte = kvtopte(va);
86652118Smckusick 		npte |= pa | PG_V | PG_G;
86752118Smckusick 		if (wired) {
86852118Smckusick 			pmap->pm_stats.wired_count += pmaxpagesperpage;
86952118Smckusick 			npte |= PG_WIRED;
87052118Smckusick 		}
87152118Smckusick 		i = pmaxpagesperpage;
87252118Smckusick 		do {
87352118Smckusick 			if (!(pte->pt_entry & PG_V)) {
87452118Smckusick 				pmap->pm_stats.resident_count++;
87552118Smckusick 			} else {
87652937Sralph #ifdef DIAGNOSTIC
87752937Sralph 				if (pte->pt_entry & PG_WIRED)
87852937Sralph 					panic("pmap_enter: kernel wired");
87952937Sralph #endif
88052118Smckusick 			}
88159842Sralph 			/*
88259842Sralph 			 * Update the same virtual address entry.
88359842Sralph 			 */
88459842Sralph 			MachTLBUpdate(va, npte);
88552118Smckusick 			pte->pt_entry = npte;
88652118Smckusick 			va += NBPG;
88752118Smckusick 			npte += NBPG;
88852118Smckusick 			pte++;
88952118Smckusick 		} while (--i != 0);
89052118Smckusick 		return;
89152118Smckusick 	}
89252118Smckusick 
89359842Sralph 	if (!(pte = pmap_segmap(pmap, va))) {
89459842Sralph 		mem = vm_page_alloc1();
89559842Sralph 		pmap_zero_page(VM_PAGE_TO_PHYS(mem));
89659842Sralph 		pmap_segmap(pmap, va) = pte = (pt_entry_t *)
89759842Sralph 			MACH_PHYS_TO_CACHED(VM_PAGE_TO_PHYS(mem));
89859842Sralph 	}
89959842Sralph 	pte += (va >> PGSHIFT) & (NPTEPG - 1);
90059842Sralph 
90152118Smckusick 	/*
90252118Smckusick 	 * Now validate mapping with desired protection/wiring.
90352118Smckusick 	 * Assume uniform modified and referenced status for all
90452118Smckusick 	 * PMAX pages in a MACH page.
90552118Smckusick 	 */
90652118Smckusick 	npte |= pa | PG_V;
90752118Smckusick 	if (wired) {
90852118Smckusick 		pmap->pm_stats.wired_count += pmaxpagesperpage;
90952118Smckusick 		npte |= PG_WIRED;
91052118Smckusick 	}
91152118Smckusick #ifdef DEBUG
91259842Sralph 	if (pmapdebug & PDB_ENTER) {
91359842Sralph 		printf("pmap_enter: new pte %x", npte);
91459842Sralph 		if (pmap->pm_tlbgen == tlbpid_gen)
91559842Sralph 			printf(" tlbpid %d", pmap->pm_tlbpid);
91659842Sralph 		printf("\n");
91759842Sralph 	}
91852118Smckusick #endif
91952118Smckusick 	i = pmaxpagesperpage;
92052118Smckusick 	do {
92159842Sralph 		pte->pt_entry = npte;
92259842Sralph 		if (pmap->pm_tlbgen == tlbpid_gen)
92359842Sralph 			MachTLBUpdate(va | (pmap->pm_tlbpid <<
92459842Sralph 				VMMACH_TLB_PID_SHIFT), npte);
92552118Smckusick 		va += NBPG;
92652118Smckusick 		npte += NBPG;
92759842Sralph 		pte++;
92852118Smckusick 	} while (--i != 0);
92952118Smckusick }
93052118Smckusick 
93152118Smckusick /*
93252118Smckusick  *	Routine:	pmap_change_wiring
93352118Smckusick  *	Function:	Change the wiring attribute for a map/virtual-address
93452118Smckusick  *			pair.
93552118Smckusick  *	In/out conditions:
93652118Smckusick  *			The mapping must already exist in the pmap.
93752118Smckusick  */
93852118Smckusick void
93952118Smckusick pmap_change_wiring(pmap, va, wired)
94052118Smckusick 	register pmap_t	pmap;
94152118Smckusick 	vm_offset_t va;
94252118Smckusick 	boolean_t wired;
94352118Smckusick {
94459842Sralph 	register pt_entry_t *pte;
94552118Smckusick 	u_int p;
94659842Sralph 	register int i;
94752118Smckusick 
94852118Smckusick #ifdef DEBUG
94959842Sralph 	if (pmapdebug & (PDB_FOLLOW|PDB_WIRING))
95052118Smckusick 		printf("pmap_change_wiring(%x, %x, %x)\n", pmap, va, wired);
95152118Smckusick #endif
95252118Smckusick 	if (pmap == NULL)
95352118Smckusick 		return;
95452118Smckusick 
95552118Smckusick 	p = wired ? PG_WIRED : 0;
95652118Smckusick 
95752118Smckusick 	/*
95852118Smckusick 	 * Don't need to flush the TLB since PG_WIRED is only in software.
95952118Smckusick 	 */
96059842Sralph 	if (!pmap->pm_segtab) {
96152118Smckusick 		/* change entries in kernel pmap */
96252741Sralph #ifdef DIAGNOSTIC
96359842Sralph 		if (va < VM_MIN_KERNEL_ADDRESS || va >= virtual_end)
96452741Sralph 			panic("pmap_change_wiring");
96552741Sralph #endif
96652118Smckusick 		pte = kvtopte(va);
96759842Sralph 	} else {
96859842Sralph 		if (!(pte = pmap_segmap(pmap, va)))
96959842Sralph 			return;
97059842Sralph 		pte += (va >> PGSHIFT) & (NPTEPG - 1);
97159842Sralph 	}
97259842Sralph 
97359842Sralph 	i = pmaxpagesperpage;
97459842Sralph 	if (!(pte->pt_entry & PG_WIRED) && p)
97559842Sralph 		pmap->pm_stats.wired_count += i;
97659842Sralph 	else if ((pte->pt_entry & PG_WIRED) && !p)
97759842Sralph 		pmap->pm_stats.wired_count -= i;
97859842Sralph 	do {
97959842Sralph 		if (pte->pt_entry & PG_V)
98052118Smckusick 			pte->pt_entry = (pte->pt_entry & ~PG_WIRED) | p;
98159842Sralph 		pte++;
98259842Sralph 	} while (--i != 0);
98352118Smckusick }
98452118Smckusick 
98552118Smckusick /*
98652118Smckusick  *	Routine:	pmap_extract
98752118Smckusick  *	Function:
98852118Smckusick  *		Extract the physical page address associated
98952118Smckusick  *		with the given map/virtual_address pair.
99052118Smckusick  */
99152118Smckusick vm_offset_t
99252118Smckusick pmap_extract(pmap, va)
99352118Smckusick 	register pmap_t	pmap;
99452118Smckusick 	vm_offset_t va;
99552118Smckusick {
99652118Smckusick 	register vm_offset_t pa;
99752118Smckusick 
99852118Smckusick #ifdef DEBUG
99952118Smckusick 	if (pmapdebug & PDB_FOLLOW)
100052118Smckusick 		printf("pmap_extract(%x, %x) -> ", pmap, va);
100152118Smckusick #endif
100252118Smckusick 
100359842Sralph 	if (!pmap->pm_segtab) {
100452741Sralph #ifdef DIAGNOSTIC
100559842Sralph 		if (va < VM_MIN_KERNEL_ADDRESS || va >= virtual_end)
100652741Sralph 			panic("pmap_extract");
100752741Sralph #endif
100852118Smckusick 		pa = kvtopte(va)->pt_entry & PG_FRAME;
100959842Sralph 	} else {
101059842Sralph 		register pt_entry_t *pte;
101159842Sralph 
101259842Sralph 		if (!(pte = pmap_segmap(pmap, va)))
101352118Smckusick 			pa = 0;
101459842Sralph 		else {
101559842Sralph 			pte += (va >> PGSHIFT) & (NPTEPG - 1);
101659842Sralph 			pa = pte->pt_entry & PG_FRAME;
101759842Sralph 		}
101859842Sralph 	}
101959842Sralph 	if (pa)
102059842Sralph 		pa |= va & PGOFSET;
102152118Smckusick 
102252118Smckusick #ifdef DEBUG
102352118Smckusick 	if (pmapdebug & PDB_FOLLOW)
102459842Sralph 		printf("pmap_extract: pa %x\n", pa);
102552118Smckusick #endif
102652741Sralph 	return (pa);
102752118Smckusick }
102852118Smckusick 
102952118Smckusick /*
103052118Smckusick  *	Copy the range specified by src_addr/len
103152118Smckusick  *	from the source map to the range dst_addr/len
103252118Smckusick  *	in the destination map.
103352118Smckusick  *
103452118Smckusick  *	This routine is only advisory and need not do anything.
103552118Smckusick  */
103652741Sralph void
103752741Sralph pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
103852118Smckusick 	pmap_t dst_pmap;
103952118Smckusick 	pmap_t src_pmap;
104052118Smckusick 	vm_offset_t dst_addr;
104152118Smckusick 	vm_size_t len;
104252118Smckusick 	vm_offset_t src_addr;
104352118Smckusick {
104452118Smckusick 
104552118Smckusick #ifdef DEBUG
104652118Smckusick 	if (pmapdebug & PDB_FOLLOW)
104752118Smckusick 		printf("pmap_copy(%x, %x, %x, %x, %x)\n",
104852118Smckusick 		       dst_pmap, src_pmap, dst_addr, len, src_addr);
104952118Smckusick #endif
105052118Smckusick }
105152118Smckusick 
105252118Smckusick /*
105352118Smckusick  *	Require that all active physical maps contain no
105452118Smckusick  *	incorrect entries NOW.  [This update includes
105552118Smckusick  *	forcing updates of any address map caching.]
105652118Smckusick  *
105752118Smckusick  *	Generally used to insure that a thread about
105852118Smckusick  *	to run will see a semantically correct world.
105952118Smckusick  */
106052741Sralph void
106152741Sralph pmap_update()
106252118Smckusick {
106352118Smckusick 
106452118Smckusick #ifdef DEBUG
106552118Smckusick 	if (pmapdebug & PDB_FOLLOW)
106652118Smckusick 		printf("pmap_update()\n");
106752118Smckusick #endif
106852118Smckusick }
106952118Smckusick 
107052118Smckusick /*
107152118Smckusick  *	Routine:	pmap_collect
107252118Smckusick  *	Function:
107352118Smckusick  *		Garbage collects the physical map system for
107452118Smckusick  *		pages which are no longer used.
107552118Smckusick  *		Success need not be guaranteed -- that is, there
107652118Smckusick  *		may well be pages which are not referenced, but
107752118Smckusick  *		others may be collected.
107852118Smckusick  *	Usage:
107952118Smckusick  *		Called by the pageout daemon when pages are scarce.
108052118Smckusick  */
108152118Smckusick void
108252118Smckusick pmap_collect(pmap)
108352118Smckusick 	pmap_t pmap;
108452118Smckusick {
108552118Smckusick 
108652118Smckusick #ifdef DEBUG
108752118Smckusick 	if (pmapdebug & PDB_FOLLOW)
108852118Smckusick 		printf("pmap_collect(%x)\n", pmap);
108952118Smckusick #endif
109052118Smckusick }
109152118Smckusick 
109252118Smckusick /*
109352118Smckusick  *	pmap_zero_page zeros the specified (machine independent)
109452118Smckusick  *	page.
109552118Smckusick  */
109652118Smckusick void
109752118Smckusick pmap_zero_page(phys)
109852741Sralph 	vm_offset_t phys;
109952118Smckusick {
110052741Sralph 	register int *p, *end;
110152118Smckusick 
110252118Smckusick #ifdef DEBUG
110352118Smckusick 	if (pmapdebug & PDB_FOLLOW)
110452118Smckusick 		printf("pmap_zero_page(%x)\n", phys);
110552118Smckusick #endif
110652741Sralph 	p = (int *)MACH_PHYS_TO_CACHED(phys);
110752741Sralph 	end = p + PAGE_SIZE / sizeof(int);
110852118Smckusick 	do {
110952741Sralph 		p[0] = 0;
111052741Sralph 		p[1] = 0;
111152741Sralph 		p[2] = 0;
111252741Sralph 		p[3] = 0;
111352741Sralph 		p += 4;
111452741Sralph 	} while (p != end);
111552118Smckusick }
111652118Smckusick 
111752118Smckusick /*
111852118Smckusick  *	pmap_copy_page copies the specified (machine independent)
111952118Smckusick  *	page.
112052118Smckusick  */
112152118Smckusick void
112252118Smckusick pmap_copy_page(src, dst)
112352741Sralph 	vm_offset_t src, dst;
112452118Smckusick {
112552741Sralph 	register int *s, *d, *end;
112652741Sralph 	register int tmp0, tmp1, tmp2, tmp3;
112752118Smckusick 
112852118Smckusick #ifdef DEBUG
112952118Smckusick 	if (pmapdebug & PDB_FOLLOW)
113052118Smckusick 		printf("pmap_copy_page(%x, %x)\n", src, dst);
113152118Smckusick #endif
113252741Sralph 	s = (int *)MACH_PHYS_TO_CACHED(src);
113352741Sralph 	d = (int *)MACH_PHYS_TO_CACHED(dst);
113452741Sralph 	end = s + PAGE_SIZE / sizeof(int);
113552118Smckusick 	do {
113652741Sralph 		tmp0 = s[0];
113752741Sralph 		tmp1 = s[1];
113852741Sralph 		tmp2 = s[2];
113952741Sralph 		tmp3 = s[3];
114052741Sralph 		d[0] = tmp0;
114152741Sralph 		d[1] = tmp1;
114252741Sralph 		d[2] = tmp2;
114352741Sralph 		d[3] = tmp3;
114452741Sralph 		s += 4;
114552741Sralph 		d += 4;
114652741Sralph 	} while (s != end);
114752118Smckusick }
114852118Smckusick 
114952118Smckusick /*
115052118Smckusick  *	Routine:	pmap_pageable
115152118Smckusick  *	Function:
115252118Smckusick  *		Make the specified pages (by pmap, offset)
115352118Smckusick  *		pageable (or not) as requested.
115452118Smckusick  *
115552118Smckusick  *		A page which is not pageable may not take
115652118Smckusick  *		a fault; therefore, its page table entry
115752118Smckusick  *		must remain valid for the duration.
115852118Smckusick  *
115952118Smckusick  *		This routine is merely advisory; pmap_enter
116052118Smckusick  *		will specify that these pages are to be wired
116152118Smckusick  *		down (or not) as appropriate.
116252118Smckusick  */
116352118Smckusick void
116452118Smckusick pmap_pageable(pmap, sva, eva, pageable)
116552118Smckusick 	pmap_t		pmap;
116652118Smckusick 	vm_offset_t	sva, eva;
116752118Smckusick 	boolean_t	pageable;
116852118Smckusick {
116952118Smckusick 
117052118Smckusick #ifdef DEBUG
117152118Smckusick 	if (pmapdebug & PDB_FOLLOW)
117252118Smckusick 		printf("pmap_pageable(%x, %x, %x, %x)\n",
117352118Smckusick 		       pmap, sva, eva, pageable);
117452118Smckusick #endif
117552118Smckusick }
117652118Smckusick 
117752118Smckusick /*
117852118Smckusick  *	Clear the modify bits on the specified physical page.
117952118Smckusick  */
118052118Smckusick void
118152118Smckusick pmap_clear_modify(pa)
118252118Smckusick 	vm_offset_t pa;
118352118Smckusick {
118452118Smckusick 
118552118Smckusick #ifdef DEBUG
118652118Smckusick 	if (pmapdebug & PDB_FOLLOW)
118752118Smckusick 		printf("pmap_clear_modify(%x)\n", pa);
118852118Smckusick #endif
118952118Smckusick #ifdef ATTR
119059842Sralph 	pmap_attributes[atop(pa)] &= ~PMAP_ATTR_MOD;
119152118Smckusick #endif
119252118Smckusick }
119352118Smckusick 
119452118Smckusick /*
119552118Smckusick  *	pmap_clear_reference:
119652118Smckusick  *
119752118Smckusick  *	Clear the reference bit on the specified physical page.
119852118Smckusick  */
119952118Smckusick void
120052118Smckusick pmap_clear_reference(pa)
120152118Smckusick 	vm_offset_t pa;
120252118Smckusick {
120352118Smckusick 
120452118Smckusick #ifdef DEBUG
120552118Smckusick 	if (pmapdebug & PDB_FOLLOW)
120652118Smckusick 		printf("pmap_clear_reference(%x)\n", pa);
120752118Smckusick #endif
120852118Smckusick #ifdef ATTR
120959842Sralph 	pmap_attributes[atop(pa)] &= ~PMAP_ATTR_REF;
121052118Smckusick #endif
121152118Smckusick }
121252118Smckusick 
121352118Smckusick /*
121452118Smckusick  *	pmap_is_referenced:
121552118Smckusick  *
121652118Smckusick  *	Return whether or not the specified physical page is referenced
121752118Smckusick  *	by any physical maps.
121852118Smckusick  */
121952118Smckusick boolean_t
122052118Smckusick pmap_is_referenced(pa)
122152118Smckusick 	vm_offset_t pa;
122252118Smckusick {
122352118Smckusick #ifdef ATTR
122459842Sralph 	return (pmap_attributes[atop(pa)] & PMAP_ATTR_REF);
122552118Smckusick #else
122652741Sralph 	return (FALSE);
122752118Smckusick #endif
122852118Smckusick }
122952118Smckusick 
123052118Smckusick /*
123152118Smckusick  *	pmap_is_modified:
123252118Smckusick  *
123352118Smckusick  *	Return whether or not the specified physical page is modified
123452118Smckusick  *	by any physical maps.
123552118Smckusick  */
123652118Smckusick boolean_t
123752118Smckusick pmap_is_modified(pa)
123852118Smckusick 	vm_offset_t pa;
123952118Smckusick {
124052118Smckusick #ifdef ATTR
124159842Sralph 	return (pmap_attributes[atop(pa)] & PMAP_ATTR_MOD);
124252118Smckusick #else
124352741Sralph 	return (FALSE);
124452118Smckusick #endif
124552118Smckusick }
124652118Smckusick 
124752118Smckusick vm_offset_t
124852118Smckusick pmap_phys_address(ppn)
124952118Smckusick 	int ppn;
125052118Smckusick {
125152118Smckusick 
125252118Smckusick #ifdef DEBUG
125352118Smckusick 	if (pmapdebug & PDB_FOLLOW)
125452118Smckusick 		printf("pmap_phys_address(%x)\n", ppn);
125552118Smckusick #endif
125652741Sralph 	return (pmax_ptob(ppn));
125752118Smckusick }
125852118Smckusick 
125952118Smckusick /*
126052118Smckusick  * Miscellaneous support routines
126152118Smckusick  */
126252118Smckusick 
126352118Smckusick /*
126452118Smckusick  * Allocate a hardware PID and return it.
126559842Sralph  * It takes almost as much or more time to search the TLB for a
126659842Sralph  * specific PID and flush those entries as it does to flush the entire TLB.
126759842Sralph  * Therefore, when we allocate a new PID, we just take the next number. When
126859842Sralph  * we run out of numbers, we flush the TLB, increment the generation count
126959842Sralph  * and start over. PID zero is reserved for kernel use.
1270*64619Sbostic  * This is called only by switch().
127152118Smckusick  */
127252118Smckusick int
127352118Smckusick pmap_alloc_tlbpid(p)
127452118Smckusick 	register struct proc *p;
127552118Smckusick {
127652118Smckusick 	register pmap_t pmap;
127752118Smckusick 	register int id;
127852118Smckusick 
127952118Smckusick 	pmap = &p->p_vmspace->vm_pmap;
128059842Sralph 	if (pmap->pm_tlbgen != tlbpid_gen) {
128159842Sralph 		id = tlbpid_cnt;
128259842Sralph 		if (id == VMMACH_NUM_PIDS) {
128359842Sralph 			MachTLBFlush();
128459842Sralph 			/* reserve tlbpid_gen == 0 to alway mean invalid */
128559842Sralph 			if (++tlbpid_gen == 0)
128659842Sralph 				tlbpid_gen = 1;
128759842Sralph 			id = 1;
128852118Smckusick 		}
128959842Sralph 		tlbpid_cnt = id + 1;
129059842Sralph 		pmap->pm_tlbpid = id;
129159842Sralph 		pmap->pm_tlbgen = tlbpid_gen;
129259842Sralph 	} else
129359842Sralph 		id = pmap->pm_tlbpid;
129452118Smckusick 
129552118Smckusick #ifdef DEBUG
129659842Sralph 	if (pmapdebug & (PDB_FOLLOW|PDB_TLBPID)) {
129759842Sralph 		if (curproc)
129859842Sralph 			printf("pmap_alloc_tlbpid: curproc %d '%s' ",
129959842Sralph 				curproc->p_pid, curproc->p_comm);
130059842Sralph 		else
130159842Sralph 			printf("pmap_alloc_tlbpid: curproc <none> ");
130259842Sralph 		printf("segtab %x tlbpid %d pid %d '%s'\n",
130359842Sralph 			pmap->pm_segtab, id, p->p_pid, p->p_comm);
130459842Sralph 	}
130552118Smckusick #endif
130652118Smckusick 	return (id);
130752118Smckusick }
130852118Smckusick 
130952118Smckusick /*
131052118Smckusick  * Remove a physical to virtual address translation.
131152118Smckusick  */
131252118Smckusick void
131352118Smckusick pmap_remove_pv(pmap, va, pa)
131452118Smckusick 	pmap_t pmap;
131552118Smckusick 	vm_offset_t va, pa;
131652118Smckusick {
131752118Smckusick 	register pv_entry_t pv, npv;
131852118Smckusick 	int s;
131952118Smckusick 
132052118Smckusick #ifdef DEBUG
132159842Sralph 	if (pmapdebug & (PDB_FOLLOW|PDB_PVENTRY))
132252118Smckusick 		printf("pmap_remove_pv(%x, %x, %x)\n", pmap, va, pa);
132352118Smckusick #endif
132452118Smckusick 	/*
132552118Smckusick 	 * Remove page from the PV table (raise IPL since we
132652118Smckusick 	 * may be called at interrupt time).
132752118Smckusick 	 */
132852118Smckusick 	if (!IS_VM_PHYSADDR(pa))
132952118Smckusick 		return;
133052118Smckusick 	pv = pa_to_pvh(pa);
133152118Smckusick 	s = splimp();
133252118Smckusick 	/*
133352118Smckusick 	 * If it is the first entry on the list, it is actually
133452118Smckusick 	 * in the header and we must copy the following entry up
133552118Smckusick 	 * to the header.  Otherwise we must search the list for
133652118Smckusick 	 * the entry.  In either case we free the now unused entry.
133752118Smckusick 	 */
133852118Smckusick 	if (pmap == pv->pv_pmap && va == pv->pv_va) {
133952118Smckusick 		npv = pv->pv_next;
134052118Smckusick 		if (npv) {
134152118Smckusick 			*pv = *npv;
134252118Smckusick 			free((caddr_t)npv, M_VMPVENT);
134352118Smckusick 		} else
134452118Smckusick 			pv->pv_pmap = NULL;
134552118Smckusick #ifdef DEBUG
134652118Smckusick 		remove_stats.pvfirst++;
134752118Smckusick #endif
134852118Smckusick 	} else {
134952118Smckusick 		for (npv = pv->pv_next; npv; pv = npv, npv = npv->pv_next) {
135052118Smckusick #ifdef DEBUG
135152118Smckusick 			remove_stats.pvsearch++;
135252118Smckusick #endif
135352118Smckusick 			if (pmap == npv->pv_pmap && va == npv->pv_va)
135452118Smckusick 				goto fnd;
135552118Smckusick 		}
135652118Smckusick #ifdef DIAGNOSTIC
135752118Smckusick 		printf("pmap_remove_pv(%x, %x, %x) not found\n", pmap, va, pa);
135852118Smckusick 		panic("pmap_remove_pv");
135952118Smckusick #endif
136052118Smckusick 	fnd:
136152118Smckusick 		pv->pv_next = npv->pv_next;
136252118Smckusick 		free((caddr_t)npv, M_VMPVENT);
136352118Smckusick 	}
136452118Smckusick 	splx(s);
136552118Smckusick }
136652118Smckusick 
136759842Sralph /*
136859842Sralph  *	vm_page_alloc1:
136959842Sralph  *
137059842Sralph  *	Allocate and return a memory cell with no associated object.
137159842Sralph  */
137259842Sralph vm_page_t
137359842Sralph vm_page_alloc1()
137452118Smckusick {
137559842Sralph 	register vm_page_t	mem;
137659842Sralph 	int		spl;
137752118Smckusick 
137859842Sralph 	spl = splimp();				/* XXX */
137959842Sralph 	simple_lock(&vm_page_queue_free_lock);
138059842Sralph 	if (queue_empty(&vm_page_queue_free)) {
138159842Sralph 		simple_unlock(&vm_page_queue_free_lock);
138259842Sralph 		splx(spl);
138359842Sralph 		return (NULL);
138459842Sralph 	}
138552118Smckusick 
138659842Sralph 	queue_remove_first(&vm_page_queue_free, mem, vm_page_t, pageq);
138759842Sralph 
138859842Sralph 	cnt.v_free_count--;
138959842Sralph 	simple_unlock(&vm_page_queue_free_lock);
139059842Sralph 	splx(spl);
139159842Sralph 
139259842Sralph 	mem->flags = PG_BUSY | PG_CLEAN | PG_FAKE;
139359842Sralph 	mem->wire_count = 0;
139459842Sralph 
139559842Sralph 	/*
139659842Sralph 	 *	Decide if we should poke the pageout daemon.
139759842Sralph 	 *	We do this if the free count is less than the low
139859842Sralph 	 *	water mark, or if the free count is less than the high
139959842Sralph 	 *	water mark (but above the low water mark) and the inactive
140059842Sralph 	 *	count is less than its target.
140159842Sralph 	 *
140259842Sralph 	 *	We don't have the counts locked ... if they change a little,
140359842Sralph 	 *	it doesn't really matter.
140459842Sralph 	 */
140559842Sralph 
140659842Sralph 	if (cnt.v_free_count < cnt.v_free_min ||
140759842Sralph 	    (cnt.v_free_count < cnt.v_free_target &&
140859842Sralph 	     cnt.v_inactive_count < cnt.v_inactive_target))
140959842Sralph 		thread_wakeup((int)&vm_pages_needed);
141059842Sralph 	return (mem);
141159842Sralph }
141259842Sralph 
141359842Sralph /*
141459842Sralph  *	vm_page_free1:
141559842Sralph  *
141659842Sralph  *	Returns the given page to the free list,
141759842Sralph  *	disassociating it with any VM object.
141859842Sralph  *
141959842Sralph  *	Object and page must be locked prior to entry.
142059842Sralph  */
142159842Sralph void
142259842Sralph vm_page_free1(mem)
142359842Sralph 	register vm_page_t	mem;
142459842Sralph {
142559842Sralph 
142659842Sralph 	if (mem->flags & PG_ACTIVE) {
142759842Sralph 		queue_remove(&vm_page_queue_active, mem, vm_page_t, pageq);
142859842Sralph 		mem->flags &= ~PG_ACTIVE;
142959842Sralph 		cnt.v_active_count--;
143052118Smckusick 	}
143159842Sralph 
143259842Sralph 	if (mem->flags & PG_INACTIVE) {
143359842Sralph 		queue_remove(&vm_page_queue_inactive, mem, vm_page_t, pageq);
143459842Sralph 		mem->flags &= ~PG_INACTIVE;
143559842Sralph 		cnt.v_inactive_count--;
143652118Smckusick 	}
143759842Sralph 
143859842Sralph 	if (!(mem->flags & PG_FICTITIOUS)) {
143959842Sralph 		int	spl;
144059842Sralph 
144159842Sralph 		spl = splimp();
144259842Sralph 		simple_lock(&vm_page_queue_free_lock);
144359842Sralph 		queue_enter(&vm_page_queue_free, mem, vm_page_t, pageq);
144459842Sralph 
144559842Sralph 		cnt.v_free_count++;
144659842Sralph 		simple_unlock(&vm_page_queue_free_lock);
144759842Sralph 		splx(spl);
144852118Smckusick 	}
144952118Smckusick }
1450