145749Smckusick /* 245749Smckusick * Copyright (c) 1990 University of Utah. 345749Smckusick * Copyright (c) 1991 The Regents of the University of California. 445749Smckusick * All rights reserved. 545749Smckusick * 645749Smckusick * This code is derived from software contributed to Berkeley by 745749Smckusick * the Systems Programming Group of the University of Utah Computer 845749Smckusick * Science Department. 945749Smckusick * 1045749Smckusick * %sccs.include.redist.c% 1145749Smckusick * 12*49289Shibler * from: Utah $Hdr: swap_pager.c 1.4 91/04/30$ 13*49289Shibler * 14*49289Shibler * @(#)swap_pager.c 7.4 (Berkeley) 05/07/91 1545749Smckusick */ 1645749Smckusick 1745749Smckusick /* 1845749Smckusick * Quick hack to page to dedicated partition(s). 1945749Smckusick * TODO: 2045749Smckusick * Add multiprocessor locks 2145749Smckusick * Deal with async writes in a better fashion 2245749Smckusick */ 2345749Smckusick 2445749Smckusick #include "swappager.h" 2545749Smckusick #if NSWAPPAGER > 0 2645749Smckusick 2745749Smckusick #include "param.h" 2845749Smckusick #include "proc.h" 2945749Smckusick #include "buf.h" 3045749Smckusick #include "map.h" 3145749Smckusick #include "systm.h" 3245749Smckusick #include "specdev.h" 3345749Smckusick #include "vnode.h" 3445749Smckusick #include "malloc.h" 3545749Smckusick #include "queue.h" 3645749Smckusick 3748386Skarels #include "vm_param.h" 3848386Skarels #include "queue.h" 3948386Skarels #include "lock.h" 4048386Skarels #include "vm_prot.h" 4148386Skarels #include "vm_object.h" 4248386Skarels #include "vm_page.h" 4348386Skarels #include "vm_pageout.h" 4448386Skarels #include "swap_pager.h" 4545749Smckusick 4645749Smckusick #define NSWSIZES 16 /* size of swtab */ 4745749Smckusick #define NPENDINGIO 64 /* max # of pending cleans */ 4845749Smckusick #define MAXDADDRS 64 /* max # of disk addrs for fixed allocations */ 4945749Smckusick 5045749Smckusick #ifdef DEBUG 5145749Smckusick int swpagerdebug = 0x100; 5245749Smckusick #define SDB_FOLLOW 0x001 5345749Smckusick #define SDB_INIT 0x002 5445749Smckusick #define SDB_ALLOC 0x004 5545749Smckusick #define SDB_IO 0x008 5645749Smckusick #define SDB_WRITE 0x010 5745749Smckusick #define SDB_FAIL 0x020 5845749Smckusick #define SDB_ALLOCBLK 0x040 5945749Smckusick #define SDB_FULL 0x080 6045749Smckusick #define SDB_ANOM 0x100 6145749Smckusick #define SDB_ANOMPANIC 0x200 6245749Smckusick #endif 6345749Smckusick 6445749Smckusick struct swpagerclean { 6545749Smckusick queue_head_t spc_list; 6645749Smckusick int spc_flags; 6745749Smckusick struct buf *spc_bp; 6845749Smckusick sw_pager_t spc_swp; 6945749Smckusick vm_offset_t spc_kva; 7045749Smckusick vm_page_t spc_m; 7145749Smckusick } swcleanlist[NPENDINGIO]; 7245749Smckusick typedef struct swpagerclean *swp_clean_t; 7345749Smckusick 7445749Smckusick /* spc_flags values */ 7545749Smckusick #define SPC_FREE 0x00 7645749Smckusick #define SPC_BUSY 0x01 7745749Smckusick #define SPC_DONE 0x02 7845749Smckusick #define SPC_ERROR 0x04 7945749Smckusick #define SPC_DIRTY 0x08 8045749Smckusick 8145749Smckusick struct swtab { 8245749Smckusick vm_size_t st_osize; /* size of object (bytes) */ 8345749Smckusick int st_bsize; /* vs. size of swap block (DEV_BSIZE units) */ 8445749Smckusick #ifdef DEBUG 8545749Smckusick u_long st_inuse; /* number in this range in use */ 8645749Smckusick u_long st_usecnt; /* total used of this size */ 8745749Smckusick #endif 8845749Smckusick } swtab[NSWSIZES+1]; 8945749Smckusick 9045749Smckusick #ifdef DEBUG 9145749Smckusick int swap_pager_pendingio; /* max pending async "clean" ops */ 9245749Smckusick int swap_pager_poip; /* pageouts in progress */ 9345749Smckusick int swap_pager_piip; /* pageins in progress */ 9445749Smckusick #endif 9545749Smckusick 9645749Smckusick queue_head_t swap_pager_inuse; /* list of pending page cleans */ 9745749Smckusick queue_head_t swap_pager_free; /* list of free pager clean structs */ 9845749Smckusick queue_head_t swap_pager_list; /* list of "named" anon regions */ 9945749Smckusick 10045749Smckusick void 10145749Smckusick swap_pager_init() 10245749Smckusick { 10345749Smckusick register swp_clean_t spc; 10445749Smckusick register int i, bsize; 10545749Smckusick extern int dmmin, dmmax; 10645749Smckusick int maxbsize; 10745749Smckusick 10845749Smckusick #ifdef DEBUG 10945749Smckusick if (swpagerdebug & (SDB_FOLLOW|SDB_INIT)) 11045749Smckusick printf("swpg_init()\n"); 11145749Smckusick #endif 11245749Smckusick dfltpagerops = &swappagerops; 11345749Smckusick queue_init(&swap_pager_list); 11445749Smckusick 11545749Smckusick /* 11645749Smckusick * Initialize clean lists 11745749Smckusick */ 11845749Smckusick queue_init(&swap_pager_inuse); 11945749Smckusick queue_init(&swap_pager_free); 12045749Smckusick for (i = 0, spc = swcleanlist; i < NPENDINGIO; i++, spc++) { 12145749Smckusick queue_enter(&swap_pager_free, spc, swp_clean_t, spc_list); 12245749Smckusick spc->spc_flags = SPC_FREE; 12345749Smckusick } 12445749Smckusick 12545749Smckusick /* 12645749Smckusick * Calculate the swap allocation constants. 12745749Smckusick */ 12845749Smckusick if (dmmin == 0) { 12945749Smckusick dmmin = DMMIN; 13045749Smckusick if (dmmin < CLBYTES/DEV_BSIZE) 13145749Smckusick dmmin = CLBYTES/DEV_BSIZE; 13245749Smckusick } 13345749Smckusick if (dmmax == 0) 13445749Smckusick dmmax = DMMAX; 13545749Smckusick 13645749Smckusick /* 13745749Smckusick * Fill in our table of object size vs. allocation size 13845749Smckusick */ 13945749Smckusick bsize = btodb(PAGE_SIZE); 14045749Smckusick if (bsize < dmmin) 14145749Smckusick bsize = dmmin; 14245749Smckusick maxbsize = btodb(sizeof(sw_bm_t) * NBBY * PAGE_SIZE); 14345749Smckusick if (maxbsize > dmmax) 14445749Smckusick maxbsize = dmmax; 14545749Smckusick for (i = 0; i < NSWSIZES; i++) { 14645749Smckusick swtab[i].st_osize = (vm_size_t) (MAXDADDRS * dbtob(bsize)); 14745749Smckusick swtab[i].st_bsize = bsize; 14845749Smckusick #ifdef DEBUG 14945749Smckusick if (swpagerdebug & SDB_INIT) 15045749Smckusick printf("swpg_init: ix %d, size %x, bsize %x\n", 15145749Smckusick i, swtab[i].st_osize, swtab[i].st_bsize); 15245749Smckusick #endif 15345749Smckusick if (bsize >= maxbsize) 15445749Smckusick break; 15545749Smckusick bsize *= 2; 15645749Smckusick } 15745749Smckusick swtab[i].st_osize = 0; 15845749Smckusick swtab[i].st_bsize = bsize; 15945749Smckusick } 16045749Smckusick 16145749Smckusick /* 16245749Smckusick * Allocate a pager structure and associated resources. 16345749Smckusick * Note that if we are called from the pageout daemon (handle == NULL) 16445749Smckusick * we should not wait for memory as it could resulting in deadlock. 16545749Smckusick */ 16645749Smckusick vm_pager_t 16745749Smckusick swap_pager_alloc(handle, size, prot) 16845749Smckusick caddr_t handle; 16945749Smckusick register vm_size_t size; 17045749Smckusick vm_prot_t prot; 17145749Smckusick { 17245749Smckusick register vm_pager_t pager; 17345749Smckusick register sw_pager_t swp; 17445749Smckusick struct swtab *swt; 17545749Smckusick int waitok; 17645749Smckusick 17745749Smckusick #ifdef DEBUG 17845749Smckusick if (swpagerdebug & (SDB_FOLLOW|SDB_ALLOC)) 17945749Smckusick printf("swpg_alloc(%x, %x, %x)\n", handle, size, prot); 18045749Smckusick #endif 18145749Smckusick /* 18245749Smckusick * If this is a "named" anonymous region, look it up and 18345749Smckusick * return the appropriate pager if it exists. 18445749Smckusick */ 18545749Smckusick if (handle) { 18645749Smckusick pager = vm_pager_lookup(&swap_pager_list, handle); 18748386Skarels if (pager != NULL) { 18845749Smckusick /* 18945749Smckusick * Use vm_object_lookup to gain a reference 19045749Smckusick * to the object and also to remove from the 19145749Smckusick * object cache. 19245749Smckusick */ 19348386Skarels if (vm_object_lookup(pager) == NULL) 19445749Smckusick panic("swap_pager_alloc: bad object"); 19545749Smckusick return(pager); 19645749Smckusick } 19745749Smckusick } 19845749Smckusick /* 19945749Smckusick * Pager doesn't exist, allocate swap management resources 20045749Smckusick * and initialize. 20145749Smckusick */ 20245749Smckusick waitok = handle ? M_WAITOK : M_NOWAIT; 20345749Smckusick pager = (vm_pager_t)malloc(sizeof *pager, M_VMPAGER, waitok); 20448386Skarels if (pager == NULL) 20548386Skarels return(NULL); 20645749Smckusick swp = (sw_pager_t)malloc(sizeof *swp, M_VMPGDATA, waitok); 20745749Smckusick if (swp == NULL) { 20845749Smckusick #ifdef DEBUG 20945749Smckusick if (swpagerdebug & SDB_FAIL) 21045749Smckusick printf("swpg_alloc: swpager malloc failed\n"); 21145749Smckusick #endif 21245749Smckusick free((caddr_t)pager, M_VMPAGER); 21348386Skarels return(NULL); 21445749Smckusick } 21545749Smckusick size = round_page(size); 21645749Smckusick for (swt = swtab; swt->st_osize; swt++) 21745749Smckusick if (size <= swt->st_osize) 21845749Smckusick break; 21945749Smckusick #ifdef DEBUG 22045749Smckusick swt->st_inuse++; 22145749Smckusick swt->st_usecnt++; 22245749Smckusick #endif 22345749Smckusick swp->sw_osize = size; 22445749Smckusick swp->sw_bsize = swt->st_bsize; 22545749Smckusick swp->sw_nblocks = (btodb(size) + swp->sw_bsize - 1) / swp->sw_bsize; 22645749Smckusick swp->sw_blocks = (sw_blk_t) 22745749Smckusick malloc(swp->sw_nblocks*sizeof(*swp->sw_blocks), 22845749Smckusick M_VMPGDATA, M_NOWAIT); 22945749Smckusick if (swp->sw_blocks == NULL) { 23045749Smckusick free((caddr_t)swp, M_VMPGDATA); 23145749Smckusick free((caddr_t)pager, M_VMPAGER); 23245749Smckusick #ifdef DEBUG 23345749Smckusick if (swpagerdebug & SDB_FAIL) 23445749Smckusick printf("swpg_alloc: sw_blocks malloc failed\n"); 23545749Smckusick swt->st_inuse--; 23645749Smckusick swt->st_usecnt--; 23745749Smckusick #endif 23845749Smckusick return(FALSE); 23945749Smckusick } 24045749Smckusick bzero((caddr_t)swp->sw_blocks, 24145749Smckusick swp->sw_nblocks * sizeof(*swp->sw_blocks)); 24245749Smckusick swp->sw_poip = 0; 24345749Smckusick if (handle) { 24445749Smckusick vm_object_t object; 24545749Smckusick 24645749Smckusick swp->sw_flags = SW_NAMED; 24745749Smckusick queue_enter(&swap_pager_list, pager, vm_pager_t, pg_list); 24845749Smckusick /* 24945749Smckusick * Consistant with other pagers: return with object 25045749Smckusick * referenced. Can't do this with handle == NULL 25145749Smckusick * since it might be the pageout daemon calling. 25245749Smckusick */ 25345749Smckusick object = vm_object_allocate(size); 25445749Smckusick vm_object_enter(object, pager); 25545749Smckusick vm_object_setpager(object, pager, 0, FALSE); 25645749Smckusick } else { 25745749Smckusick swp->sw_flags = 0; 25845749Smckusick queue_init(&pager->pg_list); 25945749Smckusick } 26045749Smckusick pager->pg_handle = handle; 26145749Smckusick pager->pg_ops = &swappagerops; 26245749Smckusick pager->pg_type = PG_SWAP; 26345749Smckusick pager->pg_data = (caddr_t)swp; 26445749Smckusick 26545749Smckusick #ifdef DEBUG 26645749Smckusick if (swpagerdebug & SDB_ALLOC) 26745749Smckusick printf("swpg_alloc: pg_data %x, %x of %x at %x\n", 26845749Smckusick swp, swp->sw_nblocks, swp->sw_bsize, swp->sw_blocks); 26945749Smckusick #endif 27045749Smckusick return(pager); 27145749Smckusick } 27245749Smckusick 27345749Smckusick void 27445749Smckusick swap_pager_dealloc(pager) 27545749Smckusick vm_pager_t pager; 27645749Smckusick { 27745749Smckusick register int i; 27845749Smckusick register sw_blk_t bp; 27945749Smckusick register sw_pager_t swp; 28045749Smckusick struct swtab *swt; 28145749Smckusick int s; 28245749Smckusick 28345749Smckusick #ifdef DEBUG 28445749Smckusick /* save panic time state */ 28545749Smckusick if ((swpagerdebug & SDB_ANOMPANIC) && panicstr) 28645749Smckusick return; 28745749Smckusick if (swpagerdebug & (SDB_FOLLOW|SDB_ALLOC)) 28845749Smckusick printf("swpg_dealloc(%x)\n", pager); 28945749Smckusick #endif 29045749Smckusick /* 29145749Smckusick * Remove from list right away so lookups will fail if we 29245749Smckusick * block for pageout completion. 29345749Smckusick */ 29445749Smckusick swp = (sw_pager_t) pager->pg_data; 29545749Smckusick if (swp->sw_flags & SW_NAMED) { 29645749Smckusick queue_remove(&swap_pager_list, pager, vm_pager_t, pg_list); 29745749Smckusick swp->sw_flags &= ~SW_NAMED; 29845749Smckusick } 29945749Smckusick #ifdef DEBUG 30045749Smckusick for (swt = swtab; swt->st_osize; swt++) 30145749Smckusick if (swp->sw_osize <= swt->st_osize) 30245749Smckusick break; 30345749Smckusick swt->st_inuse--; 30445749Smckusick #endif 30545749Smckusick 30645749Smckusick /* 30745749Smckusick * Wait for all pageouts to finish and remove 30845749Smckusick * all entries from cleaning list. 30945749Smckusick */ 31045749Smckusick s = splbio(); 31145749Smckusick while (swp->sw_poip) { 31245749Smckusick swp->sw_flags |= SW_WANTED; 31345749Smckusick assert_wait((int)swp); 31445749Smckusick thread_block(); 31545749Smckusick } 31645749Smckusick splx(s); 31748386Skarels (void) swap_pager_clean(NULL, B_WRITE); 31845749Smckusick 31945749Smckusick /* 32045749Smckusick * Free left over swap blocks 32145749Smckusick */ 32245749Smckusick for (i = 0, bp = swp->sw_blocks; i < swp->sw_nblocks; i++, bp++) 32345749Smckusick if (bp->swb_block) { 32445749Smckusick #ifdef DEBUG 32545749Smckusick if (swpagerdebug & (SDB_ALLOCBLK|SDB_FULL)) 32645749Smckusick printf("swpg_dealloc: blk %x\n", 32745749Smckusick bp->swb_block); 32845749Smckusick #endif 32945749Smckusick rmfree(swapmap, swp->sw_bsize, bp->swb_block); 33045749Smckusick } 33145749Smckusick /* 33245749Smckusick * Free swap management resources 33345749Smckusick */ 33445749Smckusick free((caddr_t)swp->sw_blocks, M_VMPGDATA); 33545749Smckusick free((caddr_t)swp, M_VMPGDATA); 33645749Smckusick free((caddr_t)pager, M_VMPAGER); 33745749Smckusick } 33845749Smckusick 33945749Smckusick swap_pager_getpage(pager, m, sync) 34045749Smckusick vm_pager_t pager; 34145749Smckusick vm_page_t m; 34245749Smckusick boolean_t sync; 34345749Smckusick { 34445749Smckusick #ifdef DEBUG 34545749Smckusick if (swpagerdebug & SDB_FOLLOW) 34645749Smckusick printf("swpg_getpage(%x, %x, %d)\n", pager, m, sync); 34745749Smckusick #endif 34845749Smckusick return(swap_pager_io((sw_pager_t)pager->pg_data, m, B_READ)); 34945749Smckusick } 35045749Smckusick 35145749Smckusick swap_pager_putpage(pager, m, sync) 35245749Smckusick vm_pager_t pager; 35345749Smckusick vm_page_t m; 35445749Smckusick boolean_t sync; 35545749Smckusick { 35645749Smckusick int flags; 35745749Smckusick 35845749Smckusick #ifdef DEBUG 35945749Smckusick if (swpagerdebug & SDB_FOLLOW) 36045749Smckusick printf("swpg_putpage(%x, %x, %d)\n", pager, m, sync); 36145749Smckusick #endif 36248386Skarels if (pager == NULL) { 36348386Skarels (void) swap_pager_clean(NULL, B_WRITE); 36445749Smckusick return; 36545749Smckusick } 36645749Smckusick flags = B_WRITE; 36745749Smckusick if (!sync) 36845749Smckusick flags |= B_ASYNC; 36945749Smckusick return(swap_pager_io((sw_pager_t)pager->pg_data, m, flags)); 37045749Smckusick } 37145749Smckusick 37245749Smckusick boolean_t 37345749Smckusick swap_pager_haspage(pager, offset) 37445749Smckusick vm_pager_t pager; 37545749Smckusick vm_offset_t offset; 37645749Smckusick { 37745749Smckusick register sw_pager_t swp; 37845749Smckusick register sw_blk_t swb; 37945749Smckusick int ix; 38045749Smckusick 38145749Smckusick #ifdef DEBUG 38245749Smckusick if (swpagerdebug & (SDB_FOLLOW|SDB_ALLOCBLK)) 38345749Smckusick printf("swpg_haspage(%x, %x) ", pager, offset); 38445749Smckusick #endif 38545749Smckusick swp = (sw_pager_t) pager->pg_data; 38645749Smckusick ix = offset / dbtob(swp->sw_bsize); 38745749Smckusick if (swp->sw_blocks == NULL || ix >= swp->sw_nblocks) { 38845749Smckusick #ifdef DEBUG 38945749Smckusick if (swpagerdebug & (SDB_FAIL|SDB_FOLLOW|SDB_ALLOCBLK)) 39045749Smckusick printf("swpg_haspage: %x bad offset %x, ix %x\n", 39145749Smckusick swp->sw_blocks, offset, ix); 39245749Smckusick #endif 39345749Smckusick return(FALSE); 39445749Smckusick } 39545749Smckusick swb = &swp->sw_blocks[ix]; 39645749Smckusick if (swb->swb_block) 39745749Smckusick ix = atop(offset % dbtob(swp->sw_bsize)); 39845749Smckusick #ifdef DEBUG 39945749Smckusick if (swpagerdebug & SDB_ALLOCBLK) 40045749Smckusick printf("%x blk %x+%x ", swp->sw_blocks, swb->swb_block, ix); 40145749Smckusick if (swpagerdebug & (SDB_FOLLOW|SDB_ALLOCBLK)) 40245749Smckusick printf("-> %c\n", 40345749Smckusick "FT"[swb->swb_block && (swb->swb_mask & (1 << ix))]); 40445749Smckusick #endif 40545749Smckusick if (swb->swb_block && (swb->swb_mask & (1 << ix))) 40645749Smckusick return(TRUE); 40745749Smckusick return(FALSE); 40845749Smckusick } 40945749Smckusick 41045749Smckusick /* 41145749Smckusick * Scaled down version of swap(). 41245749Smckusick * Assumes that PAGE_SIZE < MAXPHYS; i.e. only one operation needed. 41345749Smckusick * BOGUS: lower level IO routines expect a KVA so we have to map our 41445749Smckusick * provided physical page into the KVA to keep them happy. 41545749Smckusick */ 41645749Smckusick swap_pager_io(swp, m, flags) 41745749Smckusick register sw_pager_t swp; 41845749Smckusick vm_page_t m; 41945749Smckusick int flags; 42045749Smckusick { 42145749Smckusick register struct buf *bp; 42245749Smckusick register sw_blk_t swb; 42345749Smckusick register int s; 42445749Smckusick int ix; 42545749Smckusick boolean_t rv; 42645749Smckusick vm_offset_t kva, off; 42745749Smckusick swp_clean_t spc; 42845749Smckusick 42945749Smckusick #ifdef DEBUG 43045749Smckusick /* save panic time state */ 43145749Smckusick if ((swpagerdebug & SDB_ANOMPANIC) && panicstr) 43245749Smckusick return; 43345749Smckusick if (swpagerdebug & (SDB_FOLLOW|SDB_IO)) 43445749Smckusick printf("swpg_io(%x, %x, %x)\n", swp, m, flags); 43545749Smckusick #endif 43645749Smckusick 43745749Smckusick /* 43845749Smckusick * For reads (pageins) and synchronous writes, we clean up 439*49289Shibler * all completed async pageouts. 44045749Smckusick */ 44145749Smckusick if ((flags & B_ASYNC) == 0) { 44245749Smckusick s = splbio(); 443*49289Shibler #ifdef DEBUG 444*49289Shibler /* 445*49289Shibler * Check to see if this page is currently being cleaned. 446*49289Shibler * If it is, we just wait til the operation is done before 447*49289Shibler * continuing. 448*49289Shibler */ 44945749Smckusick while (swap_pager_clean(m, flags&B_READ)) { 450*49289Shibler if (swpagerdebug & SDB_ANOM) 451*49289Shibler printf("swap_pager_io: page %x cleaning\n", m); 452*49289Shibler 45345749Smckusick swp->sw_flags |= SW_WANTED; 45445749Smckusick assert_wait((int)swp); 45545749Smckusick thread_block(); 45645749Smckusick } 457*49289Shibler #else 458*49289Shibler (void) swap_pager_clean(m, flags&B_READ); 459*49289Shibler #endif 46045749Smckusick splx(s); 46145749Smckusick } 46245749Smckusick /* 46345749Smckusick * For async writes (pageouts), we cleanup completed pageouts so 46445749Smckusick * that all available resources are freed. Also tells us if this 46545749Smckusick * page is already being cleaned. If it is, or no resources 46645749Smckusick * are available, we try again later. 46745749Smckusick */ 468*49289Shibler else if (swap_pager_clean(m, B_WRITE) || 469*49289Shibler queue_empty(&swap_pager_free)) { 470*49289Shibler #ifdef DEBUG 471*49289Shibler if ((swpagerdebug & SDB_ANOM) && 472*49289Shibler !queue_empty(&swap_pager_free)) 473*49289Shibler printf("swap_pager_io: page %x already cleaning\n", m); 474*49289Shibler #endif 47545749Smckusick return(VM_PAGER_FAIL); 476*49289Shibler } 47745749Smckusick 47845749Smckusick /* 47945749Smckusick * Determine swap block and allocate as necessary. 48045749Smckusick */ 48145749Smckusick off = m->offset + m->object->paging_offset; 48245749Smckusick ix = off / dbtob(swp->sw_bsize); 48345749Smckusick if (swp->sw_blocks == NULL || ix >= swp->sw_nblocks) { 48445749Smckusick #ifdef DEBUG 48545749Smckusick if (swpagerdebug & SDB_FAIL) 48645749Smckusick printf("swpg_io: bad offset %x+%x(%d) in %x\n", 48745749Smckusick m->offset, m->object->paging_offset, 48845749Smckusick ix, swp->sw_blocks); 48945749Smckusick #endif 49045749Smckusick return(VM_PAGER_FAIL); 49145749Smckusick } 49245749Smckusick swb = &swp->sw_blocks[ix]; 49345749Smckusick off = off % dbtob(swp->sw_bsize); 49445749Smckusick if (flags & B_READ) { 49545749Smckusick if (swb->swb_block == 0 || 49645749Smckusick (swb->swb_mask & (1 << atop(off))) == 0) { 49745749Smckusick #ifdef DEBUG 49845749Smckusick if (swpagerdebug & (SDB_ALLOCBLK|SDB_FAIL)) 49945749Smckusick printf("swpg_io: %x bad read: blk %x+%x, mask %x, off %x+%x\n", 50045749Smckusick swp->sw_blocks, 50145749Smckusick swb->swb_block, atop(off), 50245749Smckusick swb->swb_mask, 50345749Smckusick m->offset, m->object->paging_offset); 50445749Smckusick #endif 50545749Smckusick /* XXX: should we zero page here?? */ 50645749Smckusick return(VM_PAGER_FAIL); 50745749Smckusick } 50845749Smckusick } else if (swb->swb_block == 0) { 50945749Smckusick swb->swb_block = rmalloc(swapmap, swp->sw_bsize); 51045749Smckusick if (swb->swb_block == 0) { 51145749Smckusick #ifdef DEBUG 51245749Smckusick if (swpagerdebug & SDB_FAIL) 51345749Smckusick printf("swpg_io: rmalloc of %x failed\n", 51445749Smckusick swp->sw_bsize); 51545749Smckusick #endif 51645749Smckusick return(VM_PAGER_FAIL); 51745749Smckusick } 51845749Smckusick #ifdef DEBUG 51945749Smckusick if (swpagerdebug & (SDB_FULL|SDB_ALLOCBLK)) 52045749Smckusick printf("swpg_io: %x alloc blk %x at ix %x\n", 52145749Smckusick swp->sw_blocks, swb->swb_block, ix); 52245749Smckusick #endif 52345749Smckusick } 52445749Smckusick 52545749Smckusick /* 52645749Smckusick * Allocate a kernel virtual address and initialize so that PTE 52745749Smckusick * is available for lower level IO drivers. 52845749Smckusick */ 52945749Smckusick kva = vm_pager_map_page(m); 53045749Smckusick 53145749Smckusick /* 53245749Smckusick * Get a swap buffer header and perform the IO 53345749Smckusick */ 53445749Smckusick s = splbio(); 53545749Smckusick while (bswlist.av_forw == NULL) { 53645749Smckusick #ifdef DEBUG 53745749Smckusick if (swpagerdebug & SDB_ANOM) 538*49289Shibler printf("swap_pager_io: wait on swbuf for %x (%d)\n", 53945749Smckusick m, flags); 54045749Smckusick #endif 54145749Smckusick bswlist.b_flags |= B_WANTED; 54245749Smckusick sleep((caddr_t)&bswlist, PSWP+1); 54345749Smckusick } 54445749Smckusick bp = bswlist.av_forw; 54545749Smckusick bswlist.av_forw = bp->av_forw; 54645749Smckusick splx(s); 54745749Smckusick bp->b_flags = B_BUSY | (flags & B_READ); 54848386Skarels bp->b_proc = &proc0; /* XXX (but without B_PHYS set this is ok) */ 54945749Smckusick bp->b_un.b_addr = (caddr_t)kva; 55045749Smckusick bp->b_blkno = swb->swb_block + btodb(off); 55145749Smckusick VHOLD(swapdev_vp); 55245749Smckusick bp->b_vp = swapdev_vp; 55346985Smckusick if (swapdev_vp->v_type == VBLK) 55446985Smckusick bp->b_dev = swapdev_vp->v_rdev; 55545749Smckusick bp->b_bcount = PAGE_SIZE; 55645749Smckusick if ((bp->b_flags & B_READ) == 0) 55745749Smckusick swapdev_vp->v_numoutput++; 55845749Smckusick 55945749Smckusick /* 56045749Smckusick * If this is an async write we set up additional buffer fields 56145749Smckusick * and place a "cleaning" entry on the inuse queue. 56245749Smckusick */ 56345749Smckusick if ((flags & (B_READ|B_ASYNC)) == B_ASYNC) { 56445749Smckusick #ifdef DEBUG 56545749Smckusick if (queue_empty(&swap_pager_free)) 56645749Smckusick panic("swpg_io: lost spc"); 56745749Smckusick #endif 56845749Smckusick queue_remove_first(&swap_pager_free, 56945749Smckusick spc, swp_clean_t, spc_list); 57045749Smckusick #ifdef DEBUG 57145749Smckusick if (spc->spc_flags != SPC_FREE) 57245749Smckusick panic("swpg_io: bad free spc"); 57345749Smckusick #endif 57445749Smckusick spc->spc_flags = SPC_BUSY; 57545749Smckusick spc->spc_bp = bp; 57645749Smckusick spc->spc_swp = swp; 57745749Smckusick spc->spc_kva = kva; 57845749Smckusick spc->spc_m = m; 57945749Smckusick bp->b_flags |= B_CALL; 58045749Smckusick bp->b_iodone = swap_pager_iodone; 58145749Smckusick s = splbio(); 58245749Smckusick swp->sw_poip++; 58345749Smckusick queue_enter(&swap_pager_inuse, spc, swp_clean_t, spc_list); 58445749Smckusick 58545749Smckusick #ifdef DEBUG 58645749Smckusick swap_pager_poip++; 58745749Smckusick if (swpagerdebug & SDB_WRITE) 58845749Smckusick printf("swpg_io: write: bp=%x swp=%x spc=%x poip=%d\n", 58945749Smckusick bp, swp, spc, swp->sw_poip); 59045749Smckusick if ((swpagerdebug & SDB_ALLOCBLK) && 59145749Smckusick (swb->swb_mask & (1 << atop(off))) == 0) 59245749Smckusick printf("swpg_io: %x write blk %x+%x\n", 59345749Smckusick swp->sw_blocks, swb->swb_block, atop(off)); 59445749Smckusick #endif 59545749Smckusick swb->swb_mask |= (1 << atop(off)); 59645749Smckusick splx(s); 59745749Smckusick } 59845749Smckusick #ifdef DEBUG 59945749Smckusick if (swpagerdebug & SDB_IO) 60045749Smckusick printf("swpg_io: IO start: bp %x, db %x, va %x, pa %x\n", 60145749Smckusick bp, swb->swb_block+btodb(off), kva, VM_PAGE_TO_PHYS(m)); 60245749Smckusick #endif 60345749Smckusick VOP_STRATEGY(bp); 60445749Smckusick if ((flags & (B_READ|B_ASYNC)) == B_ASYNC) { 60545749Smckusick #ifdef DEBUG 60645749Smckusick if (swpagerdebug & SDB_IO) 60745749Smckusick printf("swpg_io: IO started: bp %x\n", bp); 60845749Smckusick #endif 60945749Smckusick return(VM_PAGER_PEND); 61045749Smckusick } 61145749Smckusick s = splbio(); 61245749Smckusick #ifdef DEBUG 61345749Smckusick if (flags & B_READ) 61445749Smckusick swap_pager_piip++; 61545749Smckusick else 61645749Smckusick swap_pager_poip++; 61745749Smckusick #endif 61845749Smckusick while ((bp->b_flags & B_DONE) == 0) { 61945749Smckusick assert_wait((int)bp); 62045749Smckusick thread_block(); 62145749Smckusick } 62245749Smckusick #ifdef DEBUG 62345749Smckusick if (flags & B_READ) 62445749Smckusick --swap_pager_piip; 62545749Smckusick else 62645749Smckusick --swap_pager_poip; 62745749Smckusick #endif 62845749Smckusick rv = (bp->b_flags & B_ERROR) ? VM_PAGER_FAIL : VM_PAGER_OK; 62945749Smckusick bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_PAGET|B_UAREA|B_DIRTY); 63045749Smckusick bp->av_forw = bswlist.av_forw; 63145749Smckusick bswlist.av_forw = bp; 63245749Smckusick if (bp->b_vp) 63345749Smckusick brelvp(bp); 63445749Smckusick if (bswlist.b_flags & B_WANTED) { 63545749Smckusick bswlist.b_flags &= ~B_WANTED; 63645749Smckusick thread_wakeup((int)&bswlist); 63745749Smckusick } 63845749Smckusick if ((flags & B_READ) == 0 && rv == VM_PAGER_OK) { 639*49289Shibler m->clean = TRUE; 64045749Smckusick pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 64145749Smckusick } 64245749Smckusick splx(s); 64345749Smckusick #ifdef DEBUG 64445749Smckusick if (swpagerdebug & SDB_IO) 64545749Smckusick printf("swpg_io: IO done: bp %x, rv %d\n", bp, rv); 64645749Smckusick if ((swpagerdebug & SDB_FAIL) && rv == VM_PAGER_FAIL) 64745749Smckusick printf("swpg_io: IO error\n"); 64845749Smckusick #endif 64945749Smckusick vm_pager_unmap_page(kva); 65045749Smckusick return(rv); 65145749Smckusick } 65245749Smckusick 65345749Smckusick boolean_t 65445749Smckusick swap_pager_clean(m, rw) 65545749Smckusick vm_page_t m; 65645749Smckusick int rw; 65745749Smckusick { 65845749Smckusick register swp_clean_t spc, tspc; 65945749Smckusick register int s; 66045749Smckusick 66145749Smckusick #ifdef DEBUG 66245749Smckusick /* save panic time state */ 66345749Smckusick if ((swpagerdebug & SDB_ANOMPANIC) && panicstr) 66445749Smckusick return; 66545749Smckusick if (swpagerdebug & SDB_FOLLOW) 66645749Smckusick printf("swpg_clean(%x, %d)\n", m, rw); 66745749Smckusick #endif 66848386Skarels tspc = NULL; 66945749Smckusick for (;;) { 67045749Smckusick /* 67145749Smckusick * Look up and removal from inuse list must be done 67245749Smckusick * at splbio() to avoid conflicts with swap_pager_iodone. 67345749Smckusick */ 67445749Smckusick s = splbio(); 67545749Smckusick spc = (swp_clean_t) queue_first(&swap_pager_inuse); 67645749Smckusick while (!queue_end(&swap_pager_inuse, (queue_entry_t)spc)) { 67745749Smckusick if ((spc->spc_flags & SPC_DONE) && 67845749Smckusick swap_pager_finish(spc)) { 67945749Smckusick queue_remove(&swap_pager_inuse, spc, 68045749Smckusick swp_clean_t, spc_list); 68145749Smckusick break; 68245749Smckusick } 68345749Smckusick if (m && m == spc->spc_m) { 68445749Smckusick #ifdef DEBUG 68545749Smckusick if (swpagerdebug & SDB_ANOM) 686*49289Shibler printf("swap_pager_clean: page %x on list, flags %x\n", 68745749Smckusick m, spc->spc_flags); 68845749Smckusick #endif 68945749Smckusick tspc = spc; 69045749Smckusick } 69145749Smckusick spc = (swp_clean_t) queue_next(&spc->spc_list); 69245749Smckusick } 69345749Smckusick 69445749Smckusick /* 69545749Smckusick * No operations done, thats all we can do for now. 69645749Smckusick */ 69745749Smckusick if (queue_end(&swap_pager_inuse, (queue_entry_t)spc)) 69845749Smckusick break; 69945749Smckusick splx(s); 70045749Smckusick 70145749Smckusick /* 70245749Smckusick * The desired page was found to be busy earlier in 70345749Smckusick * the scan but has since completed. 70445749Smckusick */ 70545749Smckusick if (tspc && tspc == spc) { 70645749Smckusick #ifdef DEBUG 70745749Smckusick if (swpagerdebug & SDB_ANOM) 708*49289Shibler printf("swap_pager_clean: page %x done while looking\n", 70945749Smckusick m); 71045749Smckusick #endif 71148386Skarels tspc = NULL; 71245749Smckusick } 71345749Smckusick spc->spc_flags = SPC_FREE; 71445749Smckusick vm_pager_unmap_page(spc->spc_kva); 71545749Smckusick queue_enter(&swap_pager_free, spc, swp_clean_t, spc_list); 71645749Smckusick #ifdef DEBUG 71745749Smckusick if (swpagerdebug & SDB_WRITE) 71845749Smckusick printf("swpg_clean: free spc %x\n", spc); 71945749Smckusick #endif 72045749Smckusick } 721*49289Shibler #ifdef DEBUG 72245749Smckusick /* 72345749Smckusick * If we found that the desired page is already being cleaned 72445749Smckusick * mark it so that swap_pager_iodone() will not set the clean 72545749Smckusick * flag before the pageout daemon has another chance to clean it. 72645749Smckusick */ 72745749Smckusick if (tspc && rw == B_WRITE) { 72845749Smckusick if (swpagerdebug & SDB_ANOM) 729*49289Shibler printf("swap_pager_clean: page %x on clean list\n", 730*49289Shibler tspc); 73145749Smckusick tspc->spc_flags |= SPC_DIRTY; 73245749Smckusick } 733*49289Shibler #endif 73445749Smckusick splx(s); 73545749Smckusick 73645749Smckusick #ifdef DEBUG 73745749Smckusick if (swpagerdebug & SDB_WRITE) 73845749Smckusick printf("swpg_clean: return %d\n", tspc ? TRUE : FALSE); 73945749Smckusick if ((swpagerdebug & SDB_ANOM) && tspc) 74045749Smckusick printf("swpg_clean: %s of cleaning page %x\n", 74145749Smckusick rw == B_READ ? "get" : "put", m); 74245749Smckusick #endif 74345749Smckusick return(tspc ? TRUE : FALSE); 74445749Smckusick } 74545749Smckusick 74645749Smckusick swap_pager_finish(spc) 74745749Smckusick register swp_clean_t spc; 74845749Smckusick { 74945749Smckusick vm_object_t object = spc->spc_m->object; 75045749Smckusick 75145749Smckusick /* 75245749Smckusick * Mark the paging operation as done. 75345749Smckusick * (XXX) If we cannot get the lock, leave it til later. 75445749Smckusick * (XXX) Also we are assuming that an async write is a 75545749Smckusick * pageout operation that has incremented the counter. 75645749Smckusick */ 75745749Smckusick if (!vm_object_lock_try(object)) 75845749Smckusick return(0); 75945749Smckusick 76045749Smckusick if (--object->paging_in_progress == 0) 76145749Smckusick thread_wakeup((int) object); 76245749Smckusick 763*49289Shibler #ifdef DEBUG 76445749Smckusick /* 76545749Smckusick * XXX: this isn't even close to the right thing to do, 76645749Smckusick * introduces a variety of race conditions. 76745749Smckusick * 76845749Smckusick * If dirty, vm_pageout() has attempted to clean the page 76945749Smckusick * again. In this case we do not do anything as we will 770*49289Shibler * see the page again shortly. 77145749Smckusick */ 772*49289Shibler if (spc->spc_flags & SPC_DIRTY) { 773*49289Shibler if (swpagerdebug & SDB_ANOM) 774*49289Shibler printf("swap_pager_finish: page %x dirty again\n", 775*49289Shibler spc->spc_m); 776*49289Shibler spc->spc_m->busy = FALSE; 777*49289Shibler PAGE_WAKEUP(spc->spc_m); 778*49289Shibler vm_object_unlock(object); 779*49289Shibler return(1); 78045749Smckusick } 781*49289Shibler #endif 78245749Smckusick /* 783*49289Shibler * If no error mark as clean and inform the pmap system. 784*49289Shibler * If error, mark as dirty so we will try again. 785*49289Shibler * (XXX could get stuck doing this, should give up after awhile) 78645749Smckusick */ 787*49289Shibler if (spc->spc_flags & SPC_ERROR) { 788*49289Shibler printf("swap_pager_finish: clean of page %x failed\n", 789*49289Shibler VM_PAGE_TO_PHYS(spc->spc_m)); 790*49289Shibler spc->spc_m->laundry = TRUE; 791*49289Shibler } else { 792*49289Shibler spc->spc_m->clean = TRUE; 793*49289Shibler pmap_clear_modify(VM_PAGE_TO_PHYS(spc->spc_m)); 794*49289Shibler } 795*49289Shibler spc->spc_m->busy = FALSE; 79645749Smckusick PAGE_WAKEUP(spc->spc_m); 79745749Smckusick 79845749Smckusick vm_object_unlock(object); 79945749Smckusick return(1); 80045749Smckusick } 80145749Smckusick 80245749Smckusick swap_pager_iodone(bp) 80345749Smckusick register struct buf *bp; 80445749Smckusick { 80545749Smckusick register swp_clean_t spc; 80645749Smckusick daddr_t blk; 80745749Smckusick int s; 80845749Smckusick 80945749Smckusick #ifdef DEBUG 81045749Smckusick /* save panic time state */ 81145749Smckusick if ((swpagerdebug & SDB_ANOMPANIC) && panicstr) 81245749Smckusick return; 81345749Smckusick if (swpagerdebug & SDB_FOLLOW) 81445749Smckusick printf("swpg_iodone(%x)\n", bp); 81545749Smckusick #endif 81645749Smckusick s = splbio(); 81745749Smckusick spc = (swp_clean_t) queue_first(&swap_pager_inuse); 81845749Smckusick while (!queue_end(&swap_pager_inuse, (queue_entry_t)spc)) { 81945749Smckusick if (spc->spc_bp == bp) 82045749Smckusick break; 82145749Smckusick spc = (swp_clean_t) queue_next(&spc->spc_list); 82245749Smckusick } 82345749Smckusick #ifdef DEBUG 82445749Smckusick if (queue_end(&swap_pager_inuse, (queue_entry_t)spc)) 825*49289Shibler panic("swap_pager_iodone: bp not found"); 82645749Smckusick #endif 82745749Smckusick 82845749Smckusick spc->spc_flags &= ~SPC_BUSY; 82945749Smckusick spc->spc_flags |= SPC_DONE; 83045749Smckusick if (bp->b_flags & B_ERROR) 83145749Smckusick spc->spc_flags |= SPC_ERROR; 83245749Smckusick spc->spc_bp = NULL; 83345749Smckusick blk = bp->b_blkno; 83445749Smckusick 83545749Smckusick #ifdef DEBUG 83645749Smckusick --swap_pager_poip; 83745749Smckusick if (swpagerdebug & SDB_WRITE) 83845749Smckusick printf("swpg_iodone: bp=%x swp=%x flags=%x spc=%x poip=%x\n", 83945749Smckusick bp, spc->spc_swp, spc->spc_swp->sw_flags, 84045749Smckusick spc, spc->spc_swp->sw_poip); 84145749Smckusick #endif 84245749Smckusick 84345749Smckusick spc->spc_swp->sw_poip--; 84445749Smckusick if (spc->spc_swp->sw_flags & SW_WANTED) { 84545749Smckusick spc->spc_swp->sw_flags &= ~SW_WANTED; 84645749Smckusick thread_wakeup((int)spc->spc_swp); 84745749Smckusick } 84845749Smckusick 84945749Smckusick bp->b_flags &= ~(B_BUSY|B_WANTED|B_PHYS|B_PAGET|B_UAREA|B_DIRTY); 85045749Smckusick bp->av_forw = bswlist.av_forw; 85145749Smckusick bswlist.av_forw = bp; 85245749Smckusick if (bp->b_vp) 85345749Smckusick brelvp(bp); 85445749Smckusick if (bswlist.b_flags & B_WANTED) { 85545749Smckusick bswlist.b_flags &= ~B_WANTED; 85645749Smckusick thread_wakeup((int)&bswlist); 85745749Smckusick } 85845749Smckusick thread_wakeup((int) &vm_pages_needed); 85945749Smckusick splx(s); 86045749Smckusick } 86145749Smckusick #endif 862