xref: /csrg-svn/sys/vm/swap_pager.c (revision 64860)
145749Smckusick /*
245749Smckusick  * Copyright (c) 1990 University of Utah.
363379Sbostic  * Copyright (c) 1991, 1993
463379Sbostic  *	The Regents of the University of California.  All rights reserved.
545749Smckusick  *
645749Smckusick  * This code is derived from software contributed to Berkeley by
745749Smckusick  * the Systems Programming Group of the University of Utah Computer
845749Smckusick  * Science Department.
945749Smckusick  *
1045749Smckusick  * %sccs.include.redist.c%
1145749Smckusick  *
1249289Shibler  * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$
1349289Shibler  *
14*64860Shibler  *	@(#)swap_pager.c	8.4 (Berkeley) 11/14/93
1545749Smckusick  */
1645749Smckusick 
1745749Smckusick /*
1845749Smckusick  * Quick hack to page to dedicated partition(s).
1945749Smckusick  * TODO:
2045749Smckusick  *	Add multiprocessor locks
2145749Smckusick  *	Deal with async writes in a better fashion
2245749Smckusick  */
2345749Smckusick 
2453341Sbostic #include <sys/param.h>
2553341Sbostic #include <sys/systm.h>
2653341Sbostic #include <sys/proc.h>
2753341Sbostic #include <sys/buf.h>
2853341Sbostic #include <sys/map.h>
2953496Sheideman #include <sys/vnode.h>
3053341Sbostic #include <sys/malloc.h>
3145749Smckusick 
3255051Spendry #include <miscfs/specfs/specdev.h>
3355051Spendry 
3453341Sbostic #include <vm/vm.h>
3553341Sbostic #include <vm/vm_page.h>
3653341Sbostic #include <vm/vm_pageout.h>
3753341Sbostic #include <vm/swap_pager.h>
3845749Smckusick 
3945749Smckusick #define NSWSIZES	16	/* size of swtab */
4045749Smckusick #define NPENDINGIO	64	/* max # of pending cleans */
4145749Smckusick #define MAXDADDRS	64	/* max # of disk addrs for fixed allocations */
4245749Smckusick 
4345749Smckusick #ifdef DEBUG
4445749Smckusick int	swpagerdebug = 0x100;
4545749Smckusick #define	SDB_FOLLOW	0x001
4645749Smckusick #define SDB_INIT	0x002
4745749Smckusick #define SDB_ALLOC	0x004
4845749Smckusick #define SDB_IO		0x008
4945749Smckusick #define SDB_WRITE	0x010
5045749Smckusick #define SDB_FAIL	0x020
5145749Smckusick #define SDB_ALLOCBLK	0x040
5245749Smckusick #define SDB_FULL	0x080
5345749Smckusick #define SDB_ANOM	0x100
5445749Smckusick #define SDB_ANOMPANIC	0x200
5545749Smckusick #endif
5645749Smckusick 
5745749Smckusick struct swpagerclean {
5845749Smckusick 	queue_head_t		spc_list;
5945749Smckusick 	int			spc_flags;
6045749Smckusick 	struct buf		*spc_bp;
6145749Smckusick 	sw_pager_t		spc_swp;
6245749Smckusick 	vm_offset_t		spc_kva;
6345749Smckusick 	vm_page_t		spc_m;
6445749Smckusick } swcleanlist[NPENDINGIO];
6553341Sbostic typedef struct swpagerclean *swp_clean_t;
6645749Smckusick 
6753341Sbostic 
6845749Smckusick /* spc_flags values */
6945749Smckusick #define SPC_FREE	0x00
7045749Smckusick #define SPC_BUSY	0x01
7145749Smckusick #define SPC_DONE	0x02
7245749Smckusick #define SPC_ERROR	0x04
7345749Smckusick #define SPC_DIRTY	0x08
7445749Smckusick 
7545749Smckusick struct swtab {
7645749Smckusick 	vm_size_t st_osize;	/* size of object (bytes) */
7745749Smckusick 	int	  st_bsize;	/* vs. size of swap block (DEV_BSIZE units) */
7845749Smckusick #ifdef DEBUG
7945749Smckusick 	u_long	  st_inuse;	/* number in this range in use */
8045749Smckusick 	u_long	  st_usecnt;	/* total used of this size */
8145749Smckusick #endif
8245749Smckusick } swtab[NSWSIZES+1];
8345749Smckusick 
8445749Smckusick #ifdef DEBUG
8545749Smckusick int		swap_pager_pendingio;	/* max pending async "clean" ops */
8645749Smckusick int		swap_pager_poip;	/* pageouts in progress */
8745749Smckusick int		swap_pager_piip;	/* pageins in progress */
8845749Smckusick #endif
8945749Smckusick 
9045749Smckusick queue_head_t	swap_pager_inuse;	/* list of pending page cleans */
9145749Smckusick queue_head_t	swap_pager_free;	/* list of free pager clean structs */
9245749Smckusick queue_head_t	swap_pager_list;	/* list of "named" anon regions */
9345749Smckusick 
9453341Sbostic static int		swap_pager_finish __P((swp_clean_t));
9553341Sbostic static void 		swap_pager_init __P((void));
9664827Storek static vm_pager_t	swap_pager_alloc
9764827Storek 			    __P((caddr_t, vm_size_t, vm_prot_t, vm_offset_t));
9853341Sbostic static boolean_t	swap_pager_clean __P((vm_page_t, int));
9953341Sbostic static void		swap_pager_dealloc __P((vm_pager_t));
10053341Sbostic static int		swap_pager_getpage
10153341Sbostic 			    __P((vm_pager_t, vm_page_t, boolean_t));
10253341Sbostic static boolean_t	swap_pager_haspage __P((vm_pager_t, vm_offset_t));
10353341Sbostic static int		swap_pager_io __P((sw_pager_t, vm_page_t, int));
10453341Sbostic static void		swap_pager_iodone __P((struct buf *));
10553341Sbostic static int		swap_pager_putpage
10653341Sbostic 			    __P((vm_pager_t, vm_page_t, boolean_t));
10753341Sbostic 
10853341Sbostic struct pagerops swappagerops = {
10953341Sbostic 	swap_pager_init,
11053341Sbostic 	swap_pager_alloc,
11153341Sbostic 	swap_pager_dealloc,
11253341Sbostic 	swap_pager_getpage,
11353341Sbostic 	swap_pager_putpage,
11453341Sbostic 	swap_pager_haspage
11553341Sbostic };
11653341Sbostic 
11753341Sbostic static void
11845749Smckusick swap_pager_init()
11945749Smckusick {
12045749Smckusick 	register swp_clean_t spc;
12145749Smckusick 	register int i, bsize;
12245749Smckusick 	extern int dmmin, dmmax;
12345749Smckusick 	int maxbsize;
12445749Smckusick 
12545749Smckusick #ifdef DEBUG
12645749Smckusick 	if (swpagerdebug & (SDB_FOLLOW|SDB_INIT))
12745749Smckusick 		printf("swpg_init()\n");
12845749Smckusick #endif
12945749Smckusick 	dfltpagerops = &swappagerops;
13045749Smckusick 	queue_init(&swap_pager_list);
13145749Smckusick 
13245749Smckusick 	/*
13345749Smckusick 	 * Initialize clean lists
13445749Smckusick 	 */
13545749Smckusick 	queue_init(&swap_pager_inuse);
13645749Smckusick 	queue_init(&swap_pager_free);
13745749Smckusick 	for (i = 0, spc = swcleanlist; i < NPENDINGIO; i++, spc++) {
13845749Smckusick 		queue_enter(&swap_pager_free, spc, swp_clean_t, spc_list);
13945749Smckusick 		spc->spc_flags = SPC_FREE;
14045749Smckusick 	}
14145749Smckusick 
14245749Smckusick 	/*
14345749Smckusick 	 * Calculate the swap allocation constants.
14445749Smckusick 	 */
14545749Smckusick         if (dmmin == 0) {
14645749Smckusick                 dmmin = DMMIN;
14745749Smckusick 		if (dmmin < CLBYTES/DEV_BSIZE)
14845749Smckusick 			dmmin = CLBYTES/DEV_BSIZE;
14945749Smckusick 	}
15045749Smckusick         if (dmmax == 0)
15145749Smckusick                 dmmax = DMMAX;
15245749Smckusick 
15345749Smckusick 	/*
15445749Smckusick 	 * Fill in our table of object size vs. allocation size
15545749Smckusick 	 */
15645749Smckusick 	bsize = btodb(PAGE_SIZE);
15745749Smckusick 	if (bsize < dmmin)
15845749Smckusick 		bsize = dmmin;
15945749Smckusick 	maxbsize = btodb(sizeof(sw_bm_t) * NBBY * PAGE_SIZE);
16045749Smckusick 	if (maxbsize > dmmax)
16145749Smckusick 		maxbsize = dmmax;
16245749Smckusick 	for (i = 0; i < NSWSIZES; i++) {
16345749Smckusick 		swtab[i].st_osize = (vm_size_t) (MAXDADDRS * dbtob(bsize));
16445749Smckusick 		swtab[i].st_bsize = bsize;
16545749Smckusick #ifdef DEBUG
16645749Smckusick 		if (swpagerdebug & SDB_INIT)
16745749Smckusick 			printf("swpg_init: ix %d, size %x, bsize %x\n",
16845749Smckusick 			       i, swtab[i].st_osize, swtab[i].st_bsize);
16945749Smckusick #endif
17045749Smckusick 		if (bsize >= maxbsize)
17145749Smckusick 			break;
17245749Smckusick 		bsize *= 2;
17345749Smckusick 	}
17445749Smckusick 	swtab[i].st_osize = 0;
17545749Smckusick 	swtab[i].st_bsize = bsize;
17645749Smckusick }
17745749Smckusick 
17845749Smckusick /*
17945749Smckusick  * Allocate a pager structure and associated resources.
18045749Smckusick  * Note that if we are called from the pageout daemon (handle == NULL)
18145749Smckusick  * we should not wait for memory as it could resulting in deadlock.
18245749Smckusick  */
18353341Sbostic static vm_pager_t
18464827Storek swap_pager_alloc(handle, size, prot, foff)
18545749Smckusick 	caddr_t handle;
18645749Smckusick 	register vm_size_t size;
18745749Smckusick 	vm_prot_t prot;
18864827Storek 	vm_offset_t foff;
18945749Smckusick {
19045749Smckusick 	register vm_pager_t pager;
19145749Smckusick 	register sw_pager_t swp;
19245749Smckusick 	struct swtab *swt;
19345749Smckusick 	int waitok;
19445749Smckusick 
19545749Smckusick #ifdef DEBUG
19645749Smckusick 	if (swpagerdebug & (SDB_FOLLOW|SDB_ALLOC))
19745749Smckusick 		printf("swpg_alloc(%x, %x, %x)\n", handle, size, prot);
19845749Smckusick #endif
19945749Smckusick 	/*
20045749Smckusick 	 * If this is a "named" anonymous region, look it up and
20145749Smckusick 	 * return the appropriate pager if it exists.
20245749Smckusick 	 */
20345749Smckusick 	if (handle) {
20445749Smckusick 		pager = vm_pager_lookup(&swap_pager_list, handle);
20548386Skarels 		if (pager != NULL) {
20645749Smckusick 			/*
20745749Smckusick 			 * Use vm_object_lookup to gain a reference
20845749Smckusick 			 * to the object and also to remove from the
20945749Smckusick 			 * object cache.
21045749Smckusick 			 */
21148386Skarels 			if (vm_object_lookup(pager) == NULL)
21245749Smckusick 				panic("swap_pager_alloc: bad object");
21345749Smckusick 			return(pager);
21445749Smckusick 		}
21545749Smckusick 	}
21645749Smckusick 	/*
21745749Smckusick 	 * Pager doesn't exist, allocate swap management resources
21845749Smckusick 	 * and initialize.
21945749Smckusick 	 */
22045749Smckusick 	waitok = handle ? M_WAITOK : M_NOWAIT;
22145749Smckusick 	pager = (vm_pager_t)malloc(sizeof *pager, M_VMPAGER, waitok);
22248386Skarels 	if (pager == NULL)
22348386Skarels 		return(NULL);
22445749Smckusick 	swp = (sw_pager_t)malloc(sizeof *swp, M_VMPGDATA, waitok);
22545749Smckusick 	if (swp == NULL) {
22645749Smckusick #ifdef DEBUG
22745749Smckusick 		if (swpagerdebug & SDB_FAIL)
22845749Smckusick 			printf("swpg_alloc: swpager malloc failed\n");
22945749Smckusick #endif
23045749Smckusick 		free((caddr_t)pager, M_VMPAGER);
23148386Skarels 		return(NULL);
23245749Smckusick 	}
23345749Smckusick 	size = round_page(size);
23445749Smckusick 	for (swt = swtab; swt->st_osize; swt++)
23545749Smckusick 		if (size <= swt->st_osize)
23645749Smckusick 			break;
23745749Smckusick #ifdef DEBUG
23845749Smckusick 	swt->st_inuse++;
23945749Smckusick 	swt->st_usecnt++;
24045749Smckusick #endif
24145749Smckusick 	swp->sw_osize = size;
24245749Smckusick 	swp->sw_bsize = swt->st_bsize;
24345749Smckusick 	swp->sw_nblocks = (btodb(size) + swp->sw_bsize - 1) / swp->sw_bsize;
24445749Smckusick 	swp->sw_blocks = (sw_blk_t)
24545749Smckusick 		malloc(swp->sw_nblocks*sizeof(*swp->sw_blocks),
24645749Smckusick 		       M_VMPGDATA, M_NOWAIT);
24745749Smckusick 	if (swp->sw_blocks == NULL) {
24845749Smckusick 		free((caddr_t)swp, M_VMPGDATA);
24945749Smckusick 		free((caddr_t)pager, M_VMPAGER);
25045749Smckusick #ifdef DEBUG
25145749Smckusick 		if (swpagerdebug & SDB_FAIL)
25245749Smckusick 			printf("swpg_alloc: sw_blocks malloc failed\n");
25345749Smckusick 		swt->st_inuse--;
25445749Smckusick 		swt->st_usecnt--;
25545749Smckusick #endif
25645749Smckusick 		return(FALSE);
25745749Smckusick 	}
25845749Smckusick 	bzero((caddr_t)swp->sw_blocks,
25945749Smckusick 	      swp->sw_nblocks * sizeof(*swp->sw_blocks));
26045749Smckusick 	swp->sw_poip = 0;
26145749Smckusick 	if (handle) {
26245749Smckusick 		vm_object_t object;
26345749Smckusick 
26445749Smckusick 		swp->sw_flags = SW_NAMED;
26545749Smckusick 		queue_enter(&swap_pager_list, pager, vm_pager_t, pg_list);
26645749Smckusick 		/*
26745749Smckusick 		 * Consistant with other pagers: return with object
26845749Smckusick 		 * referenced.  Can't do this with handle == NULL
26945749Smckusick 		 * since it might be the pageout daemon calling.
27045749Smckusick 		 */
27145749Smckusick 		object = vm_object_allocate(size);
27245749Smckusick 		vm_object_enter(object, pager);
27345749Smckusick 		vm_object_setpager(object, pager, 0, FALSE);
27445749Smckusick 	} else {
27545749Smckusick 		swp->sw_flags = 0;
27645749Smckusick 		queue_init(&pager->pg_list);
27745749Smckusick 	}
27845749Smckusick 	pager->pg_handle = handle;
27945749Smckusick 	pager->pg_ops = &swappagerops;
28045749Smckusick 	pager->pg_type = PG_SWAP;
281*64860Shibler 	pager->pg_data = swp;
28245749Smckusick 
28345749Smckusick #ifdef DEBUG
28445749Smckusick 	if (swpagerdebug & SDB_ALLOC)
28545749Smckusick 		printf("swpg_alloc: pg_data %x, %x of %x at %x\n",
28645749Smckusick 		       swp, swp->sw_nblocks, swp->sw_bsize, swp->sw_blocks);
28745749Smckusick #endif
28845749Smckusick 	return(pager);
28945749Smckusick }
29045749Smckusick 
29153341Sbostic static void
29245749Smckusick swap_pager_dealloc(pager)
29345749Smckusick 	vm_pager_t pager;
29445749Smckusick {
29545749Smckusick 	register int i;
29645749Smckusick 	register sw_blk_t bp;
29745749Smckusick 	register sw_pager_t swp;
29845749Smckusick 	struct swtab *swt;
29945749Smckusick 	int s;
30045749Smckusick 
30145749Smckusick #ifdef DEBUG
30245749Smckusick 	/* save panic time state */
30345749Smckusick 	if ((swpagerdebug & SDB_ANOMPANIC) && panicstr)
30445749Smckusick 		return;
30545749Smckusick 	if (swpagerdebug & (SDB_FOLLOW|SDB_ALLOC))
30645749Smckusick 		printf("swpg_dealloc(%x)\n", pager);
30745749Smckusick #endif
30845749Smckusick 	/*
30945749Smckusick 	 * Remove from list right away so lookups will fail if we
31045749Smckusick 	 * block for pageout completion.
31145749Smckusick 	 */
31245749Smckusick 	swp = (sw_pager_t) pager->pg_data;
31345749Smckusick 	if (swp->sw_flags & SW_NAMED) {
31445749Smckusick 		queue_remove(&swap_pager_list, pager, vm_pager_t, pg_list);
31545749Smckusick 		swp->sw_flags &= ~SW_NAMED;
31645749Smckusick 	}
31745749Smckusick #ifdef DEBUG
31845749Smckusick 	for (swt = swtab; swt->st_osize; swt++)
31945749Smckusick 		if (swp->sw_osize <= swt->st_osize)
32045749Smckusick 			break;
32145749Smckusick 	swt->st_inuse--;
32245749Smckusick #endif
32345749Smckusick 
32445749Smckusick 	/*
32545749Smckusick 	 * Wait for all pageouts to finish and remove
32645749Smckusick 	 * all entries from cleaning list.
32745749Smckusick 	 */
32845749Smckusick 	s = splbio();
32945749Smckusick 	while (swp->sw_poip) {
33045749Smckusick 		swp->sw_flags |= SW_WANTED;
33153341Sbostic 		assert_wait((int)swp, 0);
33245749Smckusick 		thread_block();
33345749Smckusick 	}
33445749Smckusick 	splx(s);
33548386Skarels 	(void) swap_pager_clean(NULL, B_WRITE);
33645749Smckusick 
33745749Smckusick 	/*
33845749Smckusick 	 * Free left over swap blocks
33945749Smckusick 	 */
34045749Smckusick 	for (i = 0, bp = swp->sw_blocks; i < swp->sw_nblocks; i++, bp++)
34145749Smckusick 		if (bp->swb_block) {
34245749Smckusick #ifdef DEBUG
34345749Smckusick 			if (swpagerdebug & (SDB_ALLOCBLK|SDB_FULL))
34445749Smckusick 				printf("swpg_dealloc: blk %x\n",
34545749Smckusick 				       bp->swb_block);
34645749Smckusick #endif
34745749Smckusick 			rmfree(swapmap, swp->sw_bsize, bp->swb_block);
34845749Smckusick 		}
34945749Smckusick 	/*
35045749Smckusick 	 * Free swap management resources
35145749Smckusick 	 */
35245749Smckusick 	free((caddr_t)swp->sw_blocks, M_VMPGDATA);
35345749Smckusick 	free((caddr_t)swp, M_VMPGDATA);
35445749Smckusick 	free((caddr_t)pager, M_VMPAGER);
35545749Smckusick }
35645749Smckusick 
35753341Sbostic static int
35845749Smckusick swap_pager_getpage(pager, m, sync)
35945749Smckusick 	vm_pager_t pager;
36045749Smckusick 	vm_page_t m;
36145749Smckusick 	boolean_t sync;
36245749Smckusick {
36345749Smckusick #ifdef DEBUG
36445749Smckusick 	if (swpagerdebug & SDB_FOLLOW)
36545749Smckusick 		printf("swpg_getpage(%x, %x, %d)\n", pager, m, sync);
36645749Smckusick #endif
36745749Smckusick 	return(swap_pager_io((sw_pager_t)pager->pg_data, m, B_READ));
36845749Smckusick }
36945749Smckusick 
37053341Sbostic static int
37145749Smckusick swap_pager_putpage(pager, m, sync)
37245749Smckusick 	vm_pager_t pager;
37345749Smckusick 	vm_page_t m;
37445749Smckusick 	boolean_t sync;
37545749Smckusick {
37645749Smckusick 	int flags;
37745749Smckusick 
37845749Smckusick #ifdef DEBUG
37945749Smckusick 	if (swpagerdebug & SDB_FOLLOW)
38045749Smckusick 		printf("swpg_putpage(%x, %x, %d)\n", pager, m, sync);
38145749Smckusick #endif
38248386Skarels 	if (pager == NULL) {
38348386Skarels 		(void) swap_pager_clean(NULL, B_WRITE);
38454817Storek 		return (VM_PAGER_OK);		/* ??? */
38545749Smckusick 	}
38645749Smckusick 	flags = B_WRITE;
38745749Smckusick 	if (!sync)
38845749Smckusick 		flags |= B_ASYNC;
38945749Smckusick 	return(swap_pager_io((sw_pager_t)pager->pg_data, m, flags));
39045749Smckusick }
39145749Smckusick 
39253341Sbostic static boolean_t
39345749Smckusick swap_pager_haspage(pager, offset)
39445749Smckusick 	vm_pager_t pager;
39545749Smckusick 	vm_offset_t offset;
39645749Smckusick {
39745749Smckusick 	register sw_pager_t swp;
39845749Smckusick 	register sw_blk_t swb;
39945749Smckusick 	int ix;
40045749Smckusick 
40145749Smckusick #ifdef DEBUG
40245749Smckusick 	if (swpagerdebug & (SDB_FOLLOW|SDB_ALLOCBLK))
40345749Smckusick 		printf("swpg_haspage(%x, %x) ", pager, offset);
40445749Smckusick #endif
40545749Smckusick 	swp = (sw_pager_t) pager->pg_data;
40645749Smckusick 	ix = offset / dbtob(swp->sw_bsize);
40745749Smckusick 	if (swp->sw_blocks == NULL || ix >= swp->sw_nblocks) {
40845749Smckusick #ifdef DEBUG
40945749Smckusick 		if (swpagerdebug & (SDB_FAIL|SDB_FOLLOW|SDB_ALLOCBLK))
41045749Smckusick 			printf("swpg_haspage: %x bad offset %x, ix %x\n",
41145749Smckusick 			       swp->sw_blocks, offset, ix);
41245749Smckusick #endif
41345749Smckusick 		return(FALSE);
41445749Smckusick 	}
41545749Smckusick 	swb = &swp->sw_blocks[ix];
41645749Smckusick 	if (swb->swb_block)
41745749Smckusick 		ix = atop(offset % dbtob(swp->sw_bsize));
41845749Smckusick #ifdef DEBUG
41945749Smckusick 	if (swpagerdebug & SDB_ALLOCBLK)
42045749Smckusick 		printf("%x blk %x+%x ", swp->sw_blocks, swb->swb_block, ix);
42145749Smckusick 	if (swpagerdebug & (SDB_FOLLOW|SDB_ALLOCBLK))
42245749Smckusick 		printf("-> %c\n",
42345749Smckusick 		       "FT"[swb->swb_block && (swb->swb_mask & (1 << ix))]);
42445749Smckusick #endif
42545749Smckusick 	if (swb->swb_block && (swb->swb_mask & (1 << ix)))
42645749Smckusick 		return(TRUE);
42745749Smckusick 	return(FALSE);
42845749Smckusick }
42945749Smckusick 
43045749Smckusick /*
43145749Smckusick  * Scaled down version of swap().
43245749Smckusick  * Assumes that PAGE_SIZE < MAXPHYS; i.e. only one operation needed.
43345749Smckusick  * BOGUS:  lower level IO routines expect a KVA so we have to map our
43445749Smckusick  * provided physical page into the KVA to keep them happy.
43545749Smckusick  */
43653341Sbostic static int
43745749Smckusick swap_pager_io(swp, m, flags)
43845749Smckusick 	register sw_pager_t swp;
43945749Smckusick 	vm_page_t m;
44045749Smckusick 	int flags;
44145749Smckusick {
44245749Smckusick 	register struct buf *bp;
44345749Smckusick 	register sw_blk_t swb;
44445749Smckusick 	register int s;
44545749Smckusick 	int ix;
44645749Smckusick 	boolean_t rv;
44745749Smckusick 	vm_offset_t kva, off;
44845749Smckusick 	swp_clean_t spc;
44945749Smckusick 
45045749Smckusick #ifdef DEBUG
45145749Smckusick 	/* save panic time state */
45245749Smckusick 	if ((swpagerdebug & SDB_ANOMPANIC) && panicstr)
45353341Sbostic 		return (VM_PAGER_FAIL);		/* XXX: correct return? */
45445749Smckusick 	if (swpagerdebug & (SDB_FOLLOW|SDB_IO))
45545749Smckusick 		printf("swpg_io(%x, %x, %x)\n", swp, m, flags);
456*64860Shibler 	if ((flags & (B_READ|B_ASYNC)) == (B_READ|B_ASYNC))
457*64860Shibler 		panic("swap_pager_io: cannot do ASYNC reads");
45845749Smckusick #endif
45945749Smckusick 
46045749Smckusick 	/*
461*64860Shibler 	 * First determine if the page exists in the pager if this is
462*64860Shibler 	 * a sync read.  This quickly handles cases where we are
463*64860Shibler 	 * following shadow chains looking for the top level object
464*64860Shibler 	 * with the page.
465*64860Shibler 	 */
466*64860Shibler 	off = m->offset + m->object->paging_offset;
467*64860Shibler 	ix = off / dbtob(swp->sw_bsize);
468*64860Shibler 	if (swp->sw_blocks == NULL || ix >= swp->sw_nblocks)
469*64860Shibler 		return(VM_PAGER_FAIL);
470*64860Shibler 	swb = &swp->sw_blocks[ix];
471*64860Shibler 	off = off % dbtob(swp->sw_bsize);
472*64860Shibler 	if ((flags & B_READ) &&
473*64860Shibler 	    (swb->swb_block == 0 || (swb->swb_mask & (1 << atop(off))) == 0))
474*64860Shibler 		return(VM_PAGER_FAIL);
475*64860Shibler 
476*64860Shibler 	/*
47745749Smckusick 	 * For reads (pageins) and synchronous writes, we clean up
47849289Shibler 	 * all completed async pageouts.
47945749Smckusick 	 */
48045749Smckusick 	if ((flags & B_ASYNC) == 0) {
48145749Smckusick 		s = splbio();
48249289Shibler #ifdef DEBUG
48349289Shibler 		/*
48449289Shibler 		 * Check to see if this page is currently being cleaned.
48549289Shibler 		 * If it is, we just wait til the operation is done before
48649289Shibler 		 * continuing.
48749289Shibler 		 */
48845749Smckusick 		while (swap_pager_clean(m, flags&B_READ)) {
48949289Shibler 			if (swpagerdebug & SDB_ANOM)
49049289Shibler 				printf("swap_pager_io: page %x cleaning\n", m);
49149289Shibler 
49245749Smckusick 			swp->sw_flags |= SW_WANTED;
49353341Sbostic 			assert_wait((int)swp, 0);
49445749Smckusick 			thread_block();
49545749Smckusick 		}
49649289Shibler #else
49749289Shibler 		(void) swap_pager_clean(m, flags&B_READ);
49849289Shibler #endif
49945749Smckusick 		splx(s);
50045749Smckusick 	}
50145749Smckusick 	/*
50245749Smckusick 	 * For async writes (pageouts), we cleanup completed pageouts so
50345749Smckusick 	 * that all available resources are freed.  Also tells us if this
50445749Smckusick 	 * page is already being cleaned.  If it is, or no resources
50545749Smckusick 	 * are available, we try again later.
50645749Smckusick 	 */
50749289Shibler 	else if (swap_pager_clean(m, B_WRITE) ||
50849289Shibler 		 queue_empty(&swap_pager_free)) {
50949289Shibler #ifdef DEBUG
51049289Shibler 		if ((swpagerdebug & SDB_ANOM) &&
51149289Shibler 		    !queue_empty(&swap_pager_free))
51249289Shibler 			printf("swap_pager_io: page %x already cleaning\n", m);
51349289Shibler #endif
51445749Smckusick 		return(VM_PAGER_FAIL);
51549289Shibler 	}
51645749Smckusick 
51745749Smckusick 	/*
518*64860Shibler 	 * Allocate a swap block if necessary.
51945749Smckusick 	 */
520*64860Shibler 	if (swb->swb_block == 0) {
52145749Smckusick 		swb->swb_block = rmalloc(swapmap, swp->sw_bsize);
52245749Smckusick 		if (swb->swb_block == 0) {
52345749Smckusick #ifdef DEBUG
52445749Smckusick 			if (swpagerdebug & SDB_FAIL)
52545749Smckusick 				printf("swpg_io: rmalloc of %x failed\n",
52645749Smckusick 				       swp->sw_bsize);
52745749Smckusick #endif
52845749Smckusick 			return(VM_PAGER_FAIL);
52945749Smckusick 		}
53045749Smckusick #ifdef DEBUG
53145749Smckusick 		if (swpagerdebug & (SDB_FULL|SDB_ALLOCBLK))
53245749Smckusick 			printf("swpg_io: %x alloc blk %x at ix %x\n",
53345749Smckusick 			       swp->sw_blocks, swb->swb_block, ix);
53445749Smckusick #endif
53545749Smckusick 	}
53645749Smckusick 
53745749Smckusick 	/*
53845749Smckusick 	 * Allocate a kernel virtual address and initialize so that PTE
53945749Smckusick 	 * is available for lower level IO drivers.
54045749Smckusick 	 */
54145749Smckusick 	kva = vm_pager_map_page(m);
54245749Smckusick 
54345749Smckusick 	/*
54445749Smckusick 	 * Get a swap buffer header and perform the IO
54545749Smckusick 	 */
54645749Smckusick 	s = splbio();
54756393Smckusick 	while (bswlist.b_actf == NULL) {
54845749Smckusick #ifdef DEBUG
54945749Smckusick 		if (swpagerdebug & SDB_ANOM)
55049289Shibler 			printf("swap_pager_io: wait on swbuf for %x (%d)\n",
55145749Smckusick 			       m, flags);
55245749Smckusick #endif
55345749Smckusick 		bswlist.b_flags |= B_WANTED;
554*64860Shibler 		tsleep((caddr_t)&bswlist, PSWP+1, "swpgio", 0);
55545749Smckusick 	}
55656393Smckusick 	bp = bswlist.b_actf;
55756393Smckusick 	bswlist.b_actf = bp->b_actf;
55845749Smckusick 	splx(s);
55945749Smckusick 	bp->b_flags = B_BUSY | (flags & B_READ);
56048386Skarels 	bp->b_proc = &proc0;	/* XXX (but without B_PHYS set this is ok) */
56164546Sbostic 	bp->b_data = (caddr_t)kva;
56245749Smckusick 	bp->b_blkno = swb->swb_block + btodb(off);
56345749Smckusick 	VHOLD(swapdev_vp);
56445749Smckusick 	bp->b_vp = swapdev_vp;
56546985Smckusick 	if (swapdev_vp->v_type == VBLK)
56646985Smckusick 		bp->b_dev = swapdev_vp->v_rdev;
56745749Smckusick 	bp->b_bcount = PAGE_SIZE;
56853213Smckusick 	if ((bp->b_flags & B_READ) == 0) {
56953213Smckusick 		bp->b_dirtyoff = 0;
57053213Smckusick 		bp->b_dirtyend = PAGE_SIZE;
57145749Smckusick 		swapdev_vp->v_numoutput++;
57253213Smckusick 	}
57345749Smckusick 
57445749Smckusick 	/*
57545749Smckusick 	 * If this is an async write we set up additional buffer fields
57645749Smckusick 	 * and place a "cleaning" entry on the inuse queue.
57745749Smckusick 	 */
57845749Smckusick 	if ((flags & (B_READ|B_ASYNC)) == B_ASYNC) {
57945749Smckusick #ifdef DEBUG
58045749Smckusick 		if (queue_empty(&swap_pager_free))
58145749Smckusick 			panic("swpg_io: lost spc");
58245749Smckusick #endif
58345749Smckusick 		queue_remove_first(&swap_pager_free,
58445749Smckusick 				   spc, swp_clean_t, spc_list);
58545749Smckusick #ifdef DEBUG
58645749Smckusick 		if (spc->spc_flags != SPC_FREE)
58745749Smckusick 			panic("swpg_io: bad free spc");
58845749Smckusick #endif
58945749Smckusick 		spc->spc_flags = SPC_BUSY;
59045749Smckusick 		spc->spc_bp = bp;
59145749Smckusick 		spc->spc_swp = swp;
59245749Smckusick 		spc->spc_kva = kva;
59345749Smckusick 		spc->spc_m = m;
59445749Smckusick 		bp->b_flags |= B_CALL;
59545749Smckusick 		bp->b_iodone = swap_pager_iodone;
59645749Smckusick 		s = splbio();
59745749Smckusick 		swp->sw_poip++;
59845749Smckusick 		queue_enter(&swap_pager_inuse, spc, swp_clean_t, spc_list);
59945749Smckusick 
60045749Smckusick #ifdef DEBUG
60145749Smckusick 		swap_pager_poip++;
60245749Smckusick 		if (swpagerdebug & SDB_WRITE)
60345749Smckusick 			printf("swpg_io: write: bp=%x swp=%x spc=%x poip=%d\n",
60445749Smckusick 			       bp, swp, spc, swp->sw_poip);
60545749Smckusick 		if ((swpagerdebug & SDB_ALLOCBLK) &&
60645749Smckusick 		    (swb->swb_mask & (1 << atop(off))) == 0)
60745749Smckusick 			printf("swpg_io: %x write blk %x+%x\n",
60845749Smckusick 			       swp->sw_blocks, swb->swb_block, atop(off));
60945749Smckusick #endif
61045749Smckusick 		swb->swb_mask |= (1 << atop(off));
61145749Smckusick 		splx(s);
61245749Smckusick 	}
61345749Smckusick #ifdef DEBUG
61445749Smckusick 	if (swpagerdebug & SDB_IO)
61545749Smckusick 		printf("swpg_io: IO start: bp %x, db %x, va %x, pa %x\n",
61645749Smckusick 		       bp, swb->swb_block+btodb(off), kva, VM_PAGE_TO_PHYS(m));
61745749Smckusick #endif
61845749Smckusick 	VOP_STRATEGY(bp);
61945749Smckusick 	if ((flags & (B_READ|B_ASYNC)) == B_ASYNC) {
62045749Smckusick #ifdef DEBUG
62145749Smckusick 		if (swpagerdebug & SDB_IO)
62245749Smckusick 			printf("swpg_io:  IO started: bp %x\n", bp);
62345749Smckusick #endif
62445749Smckusick 		return(VM_PAGER_PEND);
62545749Smckusick 	}
62645749Smckusick 	s = splbio();
62745749Smckusick #ifdef DEBUG
62845749Smckusick 	if (flags & B_READ)
62945749Smckusick 		swap_pager_piip++;
63045749Smckusick 	else
63145749Smckusick 		swap_pager_poip++;
63245749Smckusick #endif
63345749Smckusick 	while ((bp->b_flags & B_DONE) == 0) {
63453341Sbostic 		assert_wait((int)bp, 0);
63545749Smckusick 		thread_block();
63645749Smckusick 	}
63745749Smckusick #ifdef DEBUG
63845749Smckusick 	if (flags & B_READ)
63945749Smckusick 		--swap_pager_piip;
64045749Smckusick 	else
64145749Smckusick 		--swap_pager_poip;
64245749Smckusick #endif
64356320Shibler 	rv = (bp->b_flags & B_ERROR) ? VM_PAGER_ERROR : VM_PAGER_OK;
64445749Smckusick 	bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_PAGET|B_UAREA|B_DIRTY);
64556393Smckusick 	bp->b_actf = bswlist.b_actf;
64656393Smckusick 	bswlist.b_actf = bp;
64745749Smckusick 	if (bp->b_vp)
64845749Smckusick 		brelvp(bp);
64945749Smckusick 	if (bswlist.b_flags & B_WANTED) {
65045749Smckusick 		bswlist.b_flags &= ~B_WANTED;
65145749Smckusick 		thread_wakeup((int)&bswlist);
65245749Smckusick 	}
65345749Smckusick 	if ((flags & B_READ) == 0 && rv == VM_PAGER_OK) {
65456382Smckusick 		m->flags |= PG_CLEAN;
65545749Smckusick 		pmap_clear_modify(VM_PAGE_TO_PHYS(m));
65645749Smckusick 	}
65745749Smckusick 	splx(s);
65845749Smckusick #ifdef DEBUG
65945749Smckusick 	if (swpagerdebug & SDB_IO)
66045749Smckusick 		printf("swpg_io:  IO done: bp %x, rv %d\n", bp, rv);
66156320Shibler 	if ((swpagerdebug & SDB_FAIL) && rv == VM_PAGER_ERROR)
66245749Smckusick 		printf("swpg_io: IO error\n");
66345749Smckusick #endif
66445749Smckusick 	vm_pager_unmap_page(kva);
66545749Smckusick 	return(rv);
66645749Smckusick }
66745749Smckusick 
66853341Sbostic static boolean_t
66945749Smckusick swap_pager_clean(m, rw)
67045749Smckusick 	vm_page_t m;
67145749Smckusick 	int rw;
67245749Smckusick {
67345749Smckusick 	register swp_clean_t spc, tspc;
67445749Smckusick 	register int s;
67545749Smckusick 
67645749Smckusick #ifdef DEBUG
67745749Smckusick 	/* save panic time state */
67845749Smckusick 	if ((swpagerdebug & SDB_ANOMPANIC) && panicstr)
67954817Storek 		return (FALSE);			/* ??? */
68045749Smckusick 	if (swpagerdebug & SDB_FOLLOW)
68145749Smckusick 		printf("swpg_clean(%x, %d)\n", m, rw);
68245749Smckusick #endif
68348386Skarels 	tspc = NULL;
68445749Smckusick 	for (;;) {
68545749Smckusick 		/*
68645749Smckusick 		 * Look up and removal from inuse list must be done
68745749Smckusick 		 * at splbio() to avoid conflicts with swap_pager_iodone.
68845749Smckusick 		 */
68945749Smckusick 		s = splbio();
69045749Smckusick 		spc = (swp_clean_t) queue_first(&swap_pager_inuse);
69145749Smckusick 		while (!queue_end(&swap_pager_inuse, (queue_entry_t)spc)) {
69245749Smckusick 			if ((spc->spc_flags & SPC_DONE) &&
69345749Smckusick 			    swap_pager_finish(spc)) {
69445749Smckusick 				queue_remove(&swap_pager_inuse, spc,
69545749Smckusick 					     swp_clean_t, spc_list);
69645749Smckusick 				break;
69745749Smckusick 			}
69845749Smckusick 			if (m && m == spc->spc_m) {
69945749Smckusick #ifdef DEBUG
70045749Smckusick 				if (swpagerdebug & SDB_ANOM)
70149289Shibler 					printf("swap_pager_clean: page %x on list, flags %x\n",
70245749Smckusick 					       m, spc->spc_flags);
70345749Smckusick #endif
70445749Smckusick 				tspc = spc;
70545749Smckusick 			}
70645749Smckusick 			spc = (swp_clean_t) queue_next(&spc->spc_list);
70745749Smckusick 		}
70845749Smckusick 
70945749Smckusick 		/*
71045749Smckusick 		 * No operations done, thats all we can do for now.
71145749Smckusick 		 */
71245749Smckusick 		if (queue_end(&swap_pager_inuse, (queue_entry_t)spc))
71345749Smckusick 			break;
71445749Smckusick 		splx(s);
71545749Smckusick 
71645749Smckusick 		/*
71745749Smckusick 		 * The desired page was found to be busy earlier in
71845749Smckusick 		 * the scan but has since completed.
71945749Smckusick 		 */
72045749Smckusick 		if (tspc && tspc == spc) {
72145749Smckusick #ifdef DEBUG
72245749Smckusick 			if (swpagerdebug & SDB_ANOM)
72349289Shibler 				printf("swap_pager_clean: page %x done while looking\n",
72445749Smckusick 				       m);
72545749Smckusick #endif
72648386Skarels 			tspc = NULL;
72745749Smckusick 		}
72845749Smckusick 		spc->spc_flags = SPC_FREE;
72945749Smckusick 		vm_pager_unmap_page(spc->spc_kva);
73045749Smckusick 		queue_enter(&swap_pager_free, spc, swp_clean_t, spc_list);
73145749Smckusick #ifdef DEBUG
73245749Smckusick 		if (swpagerdebug & SDB_WRITE)
73345749Smckusick 			printf("swpg_clean: free spc %x\n", spc);
73445749Smckusick #endif
73545749Smckusick 	}
73649289Shibler #ifdef DEBUG
73745749Smckusick 	/*
73845749Smckusick 	 * If we found that the desired page is already being cleaned
73945749Smckusick 	 * mark it so that swap_pager_iodone() will not set the clean
74045749Smckusick 	 * flag before the pageout daemon has another chance to clean it.
74145749Smckusick 	 */
74245749Smckusick 	if (tspc && rw == B_WRITE) {
74345749Smckusick 		if (swpagerdebug & SDB_ANOM)
74449289Shibler 			printf("swap_pager_clean: page %x on clean list\n",
74549289Shibler 			       tspc);
74645749Smckusick 		tspc->spc_flags |= SPC_DIRTY;
74745749Smckusick 	}
74849289Shibler #endif
74945749Smckusick 	splx(s);
75045749Smckusick 
75145749Smckusick #ifdef DEBUG
75245749Smckusick 	if (swpagerdebug & SDB_WRITE)
75345749Smckusick 		printf("swpg_clean: return %d\n", tspc ? TRUE : FALSE);
75445749Smckusick 	if ((swpagerdebug & SDB_ANOM) && tspc)
75545749Smckusick 		printf("swpg_clean: %s of cleaning page %x\n",
75645749Smckusick 		       rw == B_READ ? "get" : "put", m);
75745749Smckusick #endif
75845749Smckusick 	return(tspc ? TRUE : FALSE);
75945749Smckusick }
76045749Smckusick 
76153341Sbostic static int
76245749Smckusick swap_pager_finish(spc)
76345749Smckusick 	register swp_clean_t spc;
76445749Smckusick {
76545749Smckusick 	vm_object_t object = spc->spc_m->object;
76645749Smckusick 
76745749Smckusick 	/*
76845749Smckusick 	 * Mark the paging operation as done.
76945749Smckusick 	 * (XXX) If we cannot get the lock, leave it til later.
77045749Smckusick 	 * (XXX) Also we are assuming that an async write is a
77145749Smckusick 	 *       pageout operation that has incremented the counter.
77245749Smckusick 	 */
77345749Smckusick 	if (!vm_object_lock_try(object))
77445749Smckusick 		return(0);
77545749Smckusick 
77645749Smckusick 	if (--object->paging_in_progress == 0)
77745749Smckusick 		thread_wakeup((int) object);
77845749Smckusick 
77949289Shibler #ifdef DEBUG
78045749Smckusick 	/*
78145749Smckusick 	 * XXX: this isn't even close to the right thing to do,
78245749Smckusick 	 * introduces a variety of race conditions.
78345749Smckusick 	 *
78445749Smckusick 	 * If dirty, vm_pageout() has attempted to clean the page
78545749Smckusick 	 * again.  In this case we do not do anything as we will
78649289Shibler 	 * see the page again shortly.
78745749Smckusick 	 */
78849289Shibler 	if (spc->spc_flags & SPC_DIRTY) {
78949289Shibler 		if (swpagerdebug & SDB_ANOM)
79049289Shibler 			printf("swap_pager_finish: page %x dirty again\n",
79149289Shibler 			       spc->spc_m);
79256382Smckusick 		spc->spc_m->flags &= ~PG_BUSY;
79349289Shibler 		PAGE_WAKEUP(spc->spc_m);
79449289Shibler 		vm_object_unlock(object);
79549289Shibler 		return(1);
79645749Smckusick 	}
79749289Shibler #endif
79845749Smckusick 	/*
79949289Shibler 	 * If no error mark as clean and inform the pmap system.
80049289Shibler 	 * If error, mark as dirty so we will try again.
80149289Shibler 	 * (XXX could get stuck doing this, should give up after awhile)
80245749Smckusick 	 */
80349289Shibler 	if (spc->spc_flags & SPC_ERROR) {
80449289Shibler 		printf("swap_pager_finish: clean of page %x failed\n",
80549289Shibler 		       VM_PAGE_TO_PHYS(spc->spc_m));
80656382Smckusick 		spc->spc_m->flags |= PG_LAUNDRY;
80749289Shibler 	} else {
80856382Smckusick 		spc->spc_m->flags |= PG_CLEAN;
80949289Shibler 		pmap_clear_modify(VM_PAGE_TO_PHYS(spc->spc_m));
81049289Shibler 	}
81156382Smckusick 	spc->spc_m->flags &= ~PG_BUSY;
81245749Smckusick 	PAGE_WAKEUP(spc->spc_m);
81345749Smckusick 
81445749Smckusick 	vm_object_unlock(object);
81545749Smckusick 	return(1);
81645749Smckusick }
81745749Smckusick 
81853341Sbostic static void
81945749Smckusick swap_pager_iodone(bp)
82045749Smckusick 	register struct buf *bp;
82145749Smckusick {
82245749Smckusick 	register swp_clean_t spc;
82345749Smckusick 	daddr_t blk;
82445749Smckusick 	int s;
82545749Smckusick 
82645749Smckusick #ifdef DEBUG
82745749Smckusick 	/* save panic time state */
82845749Smckusick 	if ((swpagerdebug & SDB_ANOMPANIC) && panicstr)
82945749Smckusick 		return;
83045749Smckusick 	if (swpagerdebug & SDB_FOLLOW)
83145749Smckusick 		printf("swpg_iodone(%x)\n", bp);
83245749Smckusick #endif
83345749Smckusick 	s = splbio();
83445749Smckusick 	spc = (swp_clean_t) queue_first(&swap_pager_inuse);
83545749Smckusick 	while (!queue_end(&swap_pager_inuse, (queue_entry_t)spc)) {
83645749Smckusick 		if (spc->spc_bp == bp)
83745749Smckusick 			break;
83845749Smckusick 		spc = (swp_clean_t) queue_next(&spc->spc_list);
83945749Smckusick 	}
84045749Smckusick #ifdef DEBUG
84145749Smckusick 	if (queue_end(&swap_pager_inuse, (queue_entry_t)spc))
84249289Shibler 		panic("swap_pager_iodone: bp not found");
84345749Smckusick #endif
84445749Smckusick 
84545749Smckusick 	spc->spc_flags &= ~SPC_BUSY;
84645749Smckusick 	spc->spc_flags |= SPC_DONE;
84745749Smckusick 	if (bp->b_flags & B_ERROR)
84845749Smckusick 		spc->spc_flags |= SPC_ERROR;
84945749Smckusick 	spc->spc_bp = NULL;
85045749Smckusick 	blk = bp->b_blkno;
85145749Smckusick 
85245749Smckusick #ifdef DEBUG
85345749Smckusick 	--swap_pager_poip;
85445749Smckusick 	if (swpagerdebug & SDB_WRITE)
85545749Smckusick 		printf("swpg_iodone: bp=%x swp=%x flags=%x spc=%x poip=%x\n",
85645749Smckusick 		       bp, spc->spc_swp, spc->spc_swp->sw_flags,
85745749Smckusick 		       spc, spc->spc_swp->sw_poip);
85845749Smckusick #endif
85945749Smckusick 
86045749Smckusick 	spc->spc_swp->sw_poip--;
86145749Smckusick 	if (spc->spc_swp->sw_flags & SW_WANTED) {
86245749Smckusick 		spc->spc_swp->sw_flags &= ~SW_WANTED;
86345749Smckusick 		thread_wakeup((int)spc->spc_swp);
86445749Smckusick 	}
86545749Smckusick 
86645749Smckusick 	bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_PAGET|B_UAREA|B_DIRTY);
86756393Smckusick 	bp->b_actf = bswlist.b_actf;
86856393Smckusick 	bswlist.b_actf = bp;
86945749Smckusick 	if (bp->b_vp)
87045749Smckusick 		brelvp(bp);
87145749Smckusick 	if (bswlist.b_flags & B_WANTED) {
87245749Smckusick 		bswlist.b_flags &= ~B_WANTED;
87345749Smckusick 		thread_wakeup((int)&bswlist);
87445749Smckusick 	}
87556917Shibler 	/*
87656917Shibler 	 * Only kick the pageout daemon if we are really hurting
87756917Shibler 	 * for pages, otherwise this page will be picked up later.
87856917Shibler 	 */
87956917Shibler 	if (cnt.v_free_count < cnt.v_free_min)
88056917Shibler 		thread_wakeup((int) &vm_pages_needed);
88145749Smckusick 	splx(s);
88245749Smckusick }
883