1*0Sstevel@tonic-gate /* 2*0Sstevel@tonic-gate * CDDL HEADER START 3*0Sstevel@tonic-gate * 4*0Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*0Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*0Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*0Sstevel@tonic-gate * with the License. 8*0Sstevel@tonic-gate * 9*0Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*0Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*0Sstevel@tonic-gate * See the License for the specific language governing permissions 12*0Sstevel@tonic-gate * and limitations under the License. 13*0Sstevel@tonic-gate * 14*0Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*0Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*0Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*0Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*0Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*0Sstevel@tonic-gate * 20*0Sstevel@tonic-gate * CDDL HEADER END 21*0Sstevel@tonic-gate */ 22*0Sstevel@tonic-gate /* 23*0Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24*0Sstevel@tonic-gate * Use is subject to license terms. 25*0Sstevel@tonic-gate */ 26*0Sstevel@tonic-gate 27*0Sstevel@tonic-gate /* Copyright (c) 1984, 1986, 1987, 1988, 1989 AT&T */ 28*0Sstevel@tonic-gate /* All Rights Reserved */ 29*0Sstevel@tonic-gate 30*0Sstevel@tonic-gate /* 31*0Sstevel@tonic-gate * University Copyright- Copyright (c) 1982, 1986, 1988 32*0Sstevel@tonic-gate * The Regents of the University of California 33*0Sstevel@tonic-gate * All Rights Reserved 34*0Sstevel@tonic-gate * 35*0Sstevel@tonic-gate * University Acknowledgment- Portions of this document are derived from 36*0Sstevel@tonic-gate * software developed by the University of California, Berkeley, and its 37*0Sstevel@tonic-gate * contributors. 38*0Sstevel@tonic-gate */ 39*0Sstevel@tonic-gate 40*0Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 41*0Sstevel@tonic-gate 42*0Sstevel@tonic-gate /* 43*0Sstevel@tonic-gate * VM - paged vnode. 44*0Sstevel@tonic-gate * 45*0Sstevel@tonic-gate * This file supplies vm support for the vnode operations that deal with pages. 46*0Sstevel@tonic-gate */ 47*0Sstevel@tonic-gate #include <sys/types.h> 48*0Sstevel@tonic-gate #include <sys/t_lock.h> 49*0Sstevel@tonic-gate #include <sys/param.h> 50*0Sstevel@tonic-gate #include <sys/sysmacros.h> 51*0Sstevel@tonic-gate #include <sys/systm.h> 52*0Sstevel@tonic-gate #include <sys/time.h> 53*0Sstevel@tonic-gate #include <sys/buf.h> 54*0Sstevel@tonic-gate #include <sys/vnode.h> 55*0Sstevel@tonic-gate #include <sys/uio.h> 56*0Sstevel@tonic-gate #include <sys/vmmeter.h> 57*0Sstevel@tonic-gate #include <sys/vmsystm.h> 58*0Sstevel@tonic-gate #include <sys/mman.h> 59*0Sstevel@tonic-gate #include <sys/vfs.h> 60*0Sstevel@tonic-gate #include <sys/cred.h> 61*0Sstevel@tonic-gate #include <sys/user.h> 62*0Sstevel@tonic-gate #include <sys/kmem.h> 63*0Sstevel@tonic-gate #include <sys/cmn_err.h> 64*0Sstevel@tonic-gate #include <sys/debug.h> 65*0Sstevel@tonic-gate #include <sys/cpuvar.h> 66*0Sstevel@tonic-gate #include <sys/vtrace.h> 67*0Sstevel@tonic-gate #include <sys/tnf_probe.h> 68*0Sstevel@tonic-gate 69*0Sstevel@tonic-gate #include <vm/hat.h> 70*0Sstevel@tonic-gate #include <vm/as.h> 71*0Sstevel@tonic-gate #include <vm/seg.h> 72*0Sstevel@tonic-gate #include <vm/rm.h> 73*0Sstevel@tonic-gate #include <vm/pvn.h> 74*0Sstevel@tonic-gate #include <vm/page.h> 75*0Sstevel@tonic-gate #include <vm/seg_map.h> 76*0Sstevel@tonic-gate #include <vm/seg_kmem.h> 77*0Sstevel@tonic-gate #include <sys/fs/swapnode.h> 78*0Sstevel@tonic-gate 79*0Sstevel@tonic-gate int pvn_nofodklust = 0; 80*0Sstevel@tonic-gate int pvn_write_noklust = 0; 81*0Sstevel@tonic-gate 82*0Sstevel@tonic-gate uint_t pvn_vmodsort_supported = 0; /* set if HAT supports VMODSORT */ 83*0Sstevel@tonic-gate uint_t pvn_vmodsort_disable = 0; /* set in /etc/system to disable HAT */ 84*0Sstevel@tonic-gate /* support for vmodsort for testing */ 85*0Sstevel@tonic-gate 86*0Sstevel@tonic-gate static struct kmem_cache *marker_cache = NULL; 87*0Sstevel@tonic-gate 88*0Sstevel@tonic-gate /* 89*0Sstevel@tonic-gate * Find the largest contiguous block which contains `addr' for file offset 90*0Sstevel@tonic-gate * `offset' in it while living within the file system block sizes (`vp_off' 91*0Sstevel@tonic-gate * and `vp_len') and the address space limits for which no pages currently 92*0Sstevel@tonic-gate * exist and which map to consecutive file offsets. 93*0Sstevel@tonic-gate */ 94*0Sstevel@tonic-gate page_t * 95*0Sstevel@tonic-gate pvn_read_kluster( 96*0Sstevel@tonic-gate struct vnode *vp, 97*0Sstevel@tonic-gate u_offset_t off, 98*0Sstevel@tonic-gate struct seg *seg, 99*0Sstevel@tonic-gate caddr_t addr, 100*0Sstevel@tonic-gate u_offset_t *offp, /* return values */ 101*0Sstevel@tonic-gate size_t *lenp, /* return values */ 102*0Sstevel@tonic-gate u_offset_t vp_off, 103*0Sstevel@tonic-gate size_t vp_len, 104*0Sstevel@tonic-gate int isra) 105*0Sstevel@tonic-gate { 106*0Sstevel@tonic-gate ssize_t deltaf, deltab; 107*0Sstevel@tonic-gate page_t *pp; 108*0Sstevel@tonic-gate page_t *plist = NULL; 109*0Sstevel@tonic-gate spgcnt_t pagesavail; 110*0Sstevel@tonic-gate u_offset_t vp_end; 111*0Sstevel@tonic-gate 112*0Sstevel@tonic-gate ASSERT(off >= vp_off && off < vp_off + vp_len); 113*0Sstevel@tonic-gate 114*0Sstevel@tonic-gate /* 115*0Sstevel@tonic-gate * We only want to do klustering/read ahead if there 116*0Sstevel@tonic-gate * is more than minfree pages currently available. 117*0Sstevel@tonic-gate */ 118*0Sstevel@tonic-gate pagesavail = freemem - minfree; 119*0Sstevel@tonic-gate 120*0Sstevel@tonic-gate if (pagesavail <= 0) 121*0Sstevel@tonic-gate if (isra) 122*0Sstevel@tonic-gate return ((page_t *)NULL); /* ra case - give up */ 123*0Sstevel@tonic-gate else 124*0Sstevel@tonic-gate pagesavail = 1; /* must return a page */ 125*0Sstevel@tonic-gate 126*0Sstevel@tonic-gate /* We calculate in pages instead of bytes due to 32-bit overflows */ 127*0Sstevel@tonic-gate if (pagesavail < (spgcnt_t)btopr(vp_len)) { 128*0Sstevel@tonic-gate /* 129*0Sstevel@tonic-gate * Don't have enough free memory for the 130*0Sstevel@tonic-gate * max request, try sizing down vp request. 131*0Sstevel@tonic-gate */ 132*0Sstevel@tonic-gate deltab = (ssize_t)(off - vp_off); 133*0Sstevel@tonic-gate vp_len -= deltab; 134*0Sstevel@tonic-gate vp_off += deltab; 135*0Sstevel@tonic-gate if (pagesavail < btopr(vp_len)) { 136*0Sstevel@tonic-gate /* 137*0Sstevel@tonic-gate * Still not enough memory, just settle for 138*0Sstevel@tonic-gate * pagesavail which is at least 1. 139*0Sstevel@tonic-gate */ 140*0Sstevel@tonic-gate vp_len = ptob(pagesavail); 141*0Sstevel@tonic-gate } 142*0Sstevel@tonic-gate } 143*0Sstevel@tonic-gate 144*0Sstevel@tonic-gate vp_end = vp_off + vp_len; 145*0Sstevel@tonic-gate ASSERT(off >= vp_off && off < vp_end); 146*0Sstevel@tonic-gate 147*0Sstevel@tonic-gate if (isra && SEGOP_KLUSTER(seg, addr, 0)) 148*0Sstevel@tonic-gate return ((page_t *)NULL); /* segment driver says no */ 149*0Sstevel@tonic-gate 150*0Sstevel@tonic-gate if ((plist = page_create_va(vp, off, 151*0Sstevel@tonic-gate PAGESIZE, PG_EXCL | PG_WAIT, seg, addr)) == NULL) 152*0Sstevel@tonic-gate return ((page_t *)NULL); 153*0Sstevel@tonic-gate 154*0Sstevel@tonic-gate if (vp_len <= PAGESIZE || pvn_nofodklust) { 155*0Sstevel@tonic-gate *offp = off; 156*0Sstevel@tonic-gate *lenp = MIN(vp_len, PAGESIZE); 157*0Sstevel@tonic-gate } else { 158*0Sstevel@tonic-gate /* 159*0Sstevel@tonic-gate * Scan back from front by incrementing "deltab" and 160*0Sstevel@tonic-gate * comparing "off" with "vp_off + deltab" to avoid 161*0Sstevel@tonic-gate * "signed" versus "unsigned" conversion problems. 162*0Sstevel@tonic-gate */ 163*0Sstevel@tonic-gate for (deltab = PAGESIZE; off >= vp_off + deltab; 164*0Sstevel@tonic-gate deltab += PAGESIZE) { 165*0Sstevel@tonic-gate /* 166*0Sstevel@tonic-gate * Call back to the segment driver to verify that 167*0Sstevel@tonic-gate * the klustering/read ahead operation makes sense. 168*0Sstevel@tonic-gate */ 169*0Sstevel@tonic-gate if (SEGOP_KLUSTER(seg, addr, -deltab)) 170*0Sstevel@tonic-gate break; /* page not eligible */ 171*0Sstevel@tonic-gate if ((pp = page_create_va(vp, off - deltab, 172*0Sstevel@tonic-gate PAGESIZE, PG_EXCL, seg, addr - deltab)) 173*0Sstevel@tonic-gate == NULL) 174*0Sstevel@tonic-gate break; /* already have the page */ 175*0Sstevel@tonic-gate /* 176*0Sstevel@tonic-gate * Add page to front of page list. 177*0Sstevel@tonic-gate */ 178*0Sstevel@tonic-gate page_add(&plist, pp); 179*0Sstevel@tonic-gate } 180*0Sstevel@tonic-gate deltab -= PAGESIZE; 181*0Sstevel@tonic-gate 182*0Sstevel@tonic-gate /* scan forward from front */ 183*0Sstevel@tonic-gate for (deltaf = PAGESIZE; off + deltaf < vp_end; 184*0Sstevel@tonic-gate deltaf += PAGESIZE) { 185*0Sstevel@tonic-gate /* 186*0Sstevel@tonic-gate * Call back to the segment driver to verify that 187*0Sstevel@tonic-gate * the klustering/read ahead operation makes sense. 188*0Sstevel@tonic-gate */ 189*0Sstevel@tonic-gate if (SEGOP_KLUSTER(seg, addr, deltaf)) 190*0Sstevel@tonic-gate break; /* page not file extension */ 191*0Sstevel@tonic-gate if ((pp = page_create_va(vp, off + deltaf, 192*0Sstevel@tonic-gate PAGESIZE, PG_EXCL, seg, addr + deltaf)) 193*0Sstevel@tonic-gate == NULL) 194*0Sstevel@tonic-gate break; /* already have page */ 195*0Sstevel@tonic-gate 196*0Sstevel@tonic-gate /* 197*0Sstevel@tonic-gate * Add page to end of page list. 198*0Sstevel@tonic-gate */ 199*0Sstevel@tonic-gate page_add(&plist, pp); 200*0Sstevel@tonic-gate plist = plist->p_next; 201*0Sstevel@tonic-gate } 202*0Sstevel@tonic-gate *offp = off = off - deltab; 203*0Sstevel@tonic-gate *lenp = deltab + deltaf; 204*0Sstevel@tonic-gate ASSERT(off >= vp_off); 205*0Sstevel@tonic-gate 206*0Sstevel@tonic-gate /* 207*0Sstevel@tonic-gate * If we ended up getting more than was actually 208*0Sstevel@tonic-gate * requested, retract the returned length to only 209*0Sstevel@tonic-gate * reflect what was requested. This might happen 210*0Sstevel@tonic-gate * if we were allowed to kluster pages across a 211*0Sstevel@tonic-gate * span of (say) 5 frags, and frag size is less 212*0Sstevel@tonic-gate * than PAGESIZE. We need a whole number of 213*0Sstevel@tonic-gate * pages to contain those frags, but the returned 214*0Sstevel@tonic-gate * size should only allow the returned range to 215*0Sstevel@tonic-gate * extend as far as the end of the frags. 216*0Sstevel@tonic-gate */ 217*0Sstevel@tonic-gate if ((vp_off + vp_len) < (off + *lenp)) { 218*0Sstevel@tonic-gate ASSERT(vp_end > off); 219*0Sstevel@tonic-gate *lenp = vp_end - off; 220*0Sstevel@tonic-gate } 221*0Sstevel@tonic-gate } 222*0Sstevel@tonic-gate TRACE_3(TR_FAC_VM, TR_PVN_READ_KLUSTER, 223*0Sstevel@tonic-gate "pvn_read_kluster:seg %p addr %x isra %x", 224*0Sstevel@tonic-gate seg, addr, isra); 225*0Sstevel@tonic-gate return (plist); 226*0Sstevel@tonic-gate } 227*0Sstevel@tonic-gate 228*0Sstevel@tonic-gate /* 229*0Sstevel@tonic-gate * Handle pages for this vnode on either side of the page "pp" 230*0Sstevel@tonic-gate * which has been locked by the caller. This routine will also 231*0Sstevel@tonic-gate * do klustering in the range [vp_off, vp_off + vp_len] up 232*0Sstevel@tonic-gate * until a page which is not found. The offset and length 233*0Sstevel@tonic-gate * of pages included is returned in "*offp" and "*lenp". 234*0Sstevel@tonic-gate * 235*0Sstevel@tonic-gate * Returns a list of dirty locked pages all ready to be 236*0Sstevel@tonic-gate * written back. 237*0Sstevel@tonic-gate */ 238*0Sstevel@tonic-gate page_t * 239*0Sstevel@tonic-gate pvn_write_kluster( 240*0Sstevel@tonic-gate struct vnode *vp, 241*0Sstevel@tonic-gate page_t *pp, 242*0Sstevel@tonic-gate u_offset_t *offp, /* return values */ 243*0Sstevel@tonic-gate size_t *lenp, /* return values */ 244*0Sstevel@tonic-gate u_offset_t vp_off, 245*0Sstevel@tonic-gate size_t vp_len, 246*0Sstevel@tonic-gate int flags) 247*0Sstevel@tonic-gate { 248*0Sstevel@tonic-gate u_offset_t off; 249*0Sstevel@tonic-gate page_t *dirty; 250*0Sstevel@tonic-gate size_t deltab, deltaf; 251*0Sstevel@tonic-gate se_t se; 252*0Sstevel@tonic-gate u_offset_t vp_end; 253*0Sstevel@tonic-gate 254*0Sstevel@tonic-gate off = pp->p_offset; 255*0Sstevel@tonic-gate 256*0Sstevel@tonic-gate /* 257*0Sstevel@tonic-gate * Kustering should not be done if we are invalidating 258*0Sstevel@tonic-gate * pages since we could destroy pages that belong to 259*0Sstevel@tonic-gate * some other process if this is a swap vnode. 260*0Sstevel@tonic-gate */ 261*0Sstevel@tonic-gate if (pvn_write_noklust || ((flags & B_INVAL) && IS_SWAPVP(vp))) { 262*0Sstevel@tonic-gate *offp = off; 263*0Sstevel@tonic-gate *lenp = PAGESIZE; 264*0Sstevel@tonic-gate return (pp); 265*0Sstevel@tonic-gate } 266*0Sstevel@tonic-gate 267*0Sstevel@tonic-gate if (flags & (B_FREE | B_INVAL)) 268*0Sstevel@tonic-gate se = SE_EXCL; 269*0Sstevel@tonic-gate else 270*0Sstevel@tonic-gate se = SE_SHARED; 271*0Sstevel@tonic-gate 272*0Sstevel@tonic-gate dirty = pp; 273*0Sstevel@tonic-gate /* 274*0Sstevel@tonic-gate * Scan backwards looking for pages to kluster by incrementing 275*0Sstevel@tonic-gate * "deltab" and comparing "off" with "vp_off + deltab" to 276*0Sstevel@tonic-gate * avoid "signed" versus "unsigned" conversion problems. 277*0Sstevel@tonic-gate */ 278*0Sstevel@tonic-gate for (deltab = PAGESIZE; off >= vp_off + deltab; deltab += PAGESIZE) { 279*0Sstevel@tonic-gate pp = page_lookup_nowait(vp, off - deltab, se); 280*0Sstevel@tonic-gate if (pp == NULL) 281*0Sstevel@tonic-gate break; /* page not found */ 282*0Sstevel@tonic-gate if (pvn_getdirty(pp, flags | B_DELWRI) == 0) 283*0Sstevel@tonic-gate break; 284*0Sstevel@tonic-gate page_add(&dirty, pp); 285*0Sstevel@tonic-gate } 286*0Sstevel@tonic-gate deltab -= PAGESIZE; 287*0Sstevel@tonic-gate 288*0Sstevel@tonic-gate vp_end = vp_off + vp_len; 289*0Sstevel@tonic-gate /* now scan forwards looking for pages to kluster */ 290*0Sstevel@tonic-gate for (deltaf = PAGESIZE; off + deltaf < vp_end; deltaf += PAGESIZE) { 291*0Sstevel@tonic-gate pp = page_lookup_nowait(vp, off + deltaf, se); 292*0Sstevel@tonic-gate if (pp == NULL) 293*0Sstevel@tonic-gate break; /* page not found */ 294*0Sstevel@tonic-gate if (pvn_getdirty(pp, flags | B_DELWRI) == 0) 295*0Sstevel@tonic-gate break; 296*0Sstevel@tonic-gate page_add(&dirty, pp); 297*0Sstevel@tonic-gate dirty = dirty->p_next; 298*0Sstevel@tonic-gate } 299*0Sstevel@tonic-gate 300*0Sstevel@tonic-gate *offp = off - deltab; 301*0Sstevel@tonic-gate *lenp = deltab + deltaf; 302*0Sstevel@tonic-gate return (dirty); 303*0Sstevel@tonic-gate } 304*0Sstevel@tonic-gate 305*0Sstevel@tonic-gate /* 306*0Sstevel@tonic-gate * Generic entry point used to release the "shared/exclusive" lock 307*0Sstevel@tonic-gate * and the "p_iolock" on pages after i/o is complete. 308*0Sstevel@tonic-gate */ 309*0Sstevel@tonic-gate void 310*0Sstevel@tonic-gate pvn_io_done(page_t *plist) 311*0Sstevel@tonic-gate { 312*0Sstevel@tonic-gate page_t *pp; 313*0Sstevel@tonic-gate 314*0Sstevel@tonic-gate while (plist != NULL) { 315*0Sstevel@tonic-gate pp = plist; 316*0Sstevel@tonic-gate page_sub(&plist, pp); 317*0Sstevel@tonic-gate page_io_unlock(pp); 318*0Sstevel@tonic-gate page_unlock(pp); 319*0Sstevel@tonic-gate } 320*0Sstevel@tonic-gate } 321*0Sstevel@tonic-gate 322*0Sstevel@tonic-gate /* 323*0Sstevel@tonic-gate * Entry point to be used by file system getpage subr's and 324*0Sstevel@tonic-gate * other such routines which either want to unlock pages (B_ASYNC 325*0Sstevel@tonic-gate * request) or destroy a list of pages if an error occurred. 326*0Sstevel@tonic-gate */ 327*0Sstevel@tonic-gate void 328*0Sstevel@tonic-gate pvn_read_done(page_t *plist, int flags) 329*0Sstevel@tonic-gate { 330*0Sstevel@tonic-gate page_t *pp; 331*0Sstevel@tonic-gate 332*0Sstevel@tonic-gate while (plist != NULL) { 333*0Sstevel@tonic-gate pp = plist; 334*0Sstevel@tonic-gate page_sub(&plist, pp); 335*0Sstevel@tonic-gate page_io_unlock(pp); 336*0Sstevel@tonic-gate if (flags & B_ERROR) { 337*0Sstevel@tonic-gate /*LINTED: constant in conditional context*/ 338*0Sstevel@tonic-gate VN_DISPOSE(pp, B_INVAL, 0, kcred); 339*0Sstevel@tonic-gate } else { 340*0Sstevel@tonic-gate (void) page_release(pp, 0); 341*0Sstevel@tonic-gate } 342*0Sstevel@tonic-gate } 343*0Sstevel@tonic-gate } 344*0Sstevel@tonic-gate 345*0Sstevel@tonic-gate /* 346*0Sstevel@tonic-gate * Automagic pageout. 347*0Sstevel@tonic-gate * When memory gets tight, start freeing pages popping out of the 348*0Sstevel@tonic-gate * write queue. 349*0Sstevel@tonic-gate */ 350*0Sstevel@tonic-gate int write_free = 1; 351*0Sstevel@tonic-gate pgcnt_t pages_before_pager = 200; /* LMXXX */ 352*0Sstevel@tonic-gate 353*0Sstevel@tonic-gate /* 354*0Sstevel@tonic-gate * Routine to be called when page-out's complete. 355*0Sstevel@tonic-gate * The caller, typically VOP_PUTPAGE, has to explicity call this routine 356*0Sstevel@tonic-gate * after waiting for i/o to complete (biowait) to free the list of 357*0Sstevel@tonic-gate * pages associated with the buffer. These pages must be locked 358*0Sstevel@tonic-gate * before i/o is initiated. 359*0Sstevel@tonic-gate * 360*0Sstevel@tonic-gate * If a write error occurs, the pages are marked as modified 361*0Sstevel@tonic-gate * so the write will be re-tried later. 362*0Sstevel@tonic-gate */ 363*0Sstevel@tonic-gate 364*0Sstevel@tonic-gate void 365*0Sstevel@tonic-gate pvn_write_done(page_t *plist, int flags) 366*0Sstevel@tonic-gate { 367*0Sstevel@tonic-gate int dfree = 0; 368*0Sstevel@tonic-gate int pgrec = 0; 369*0Sstevel@tonic-gate int pgout = 0; 370*0Sstevel@tonic-gate int pgpgout = 0; 371*0Sstevel@tonic-gate int anonpgout = 0; 372*0Sstevel@tonic-gate int anonfree = 0; 373*0Sstevel@tonic-gate int fspgout = 0; 374*0Sstevel@tonic-gate int fsfree = 0; 375*0Sstevel@tonic-gate int execpgout = 0; 376*0Sstevel@tonic-gate int execfree = 0; 377*0Sstevel@tonic-gate page_t *pp; 378*0Sstevel@tonic-gate struct cpu *cpup; 379*0Sstevel@tonic-gate struct vnode *vp = NULL; /* for probe */ 380*0Sstevel@tonic-gate uint_t ppattr; 381*0Sstevel@tonic-gate 382*0Sstevel@tonic-gate ASSERT((flags & B_READ) == 0); 383*0Sstevel@tonic-gate 384*0Sstevel@tonic-gate /* 385*0Sstevel@tonic-gate * If we are about to start paging anyway, start freeing pages. 386*0Sstevel@tonic-gate */ 387*0Sstevel@tonic-gate if (write_free && freemem < lotsfree + pages_before_pager && 388*0Sstevel@tonic-gate (flags & B_ERROR) == 0) { 389*0Sstevel@tonic-gate flags |= B_FREE; 390*0Sstevel@tonic-gate } 391*0Sstevel@tonic-gate 392*0Sstevel@tonic-gate /* 393*0Sstevel@tonic-gate * Handle each page involved in the i/o operation. 394*0Sstevel@tonic-gate */ 395*0Sstevel@tonic-gate while (plist != NULL) { 396*0Sstevel@tonic-gate pp = plist; 397*0Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp) && page_iolock_assert(pp)); 398*0Sstevel@tonic-gate page_sub(&plist, pp); 399*0Sstevel@tonic-gate 400*0Sstevel@tonic-gate /* Kernel probe support */ 401*0Sstevel@tonic-gate if (vp == NULL) 402*0Sstevel@tonic-gate vp = pp->p_vnode; 403*0Sstevel@tonic-gate 404*0Sstevel@tonic-gate if (flags & B_ERROR) { 405*0Sstevel@tonic-gate /* 406*0Sstevel@tonic-gate * Write operation failed. We don't want 407*0Sstevel@tonic-gate * to destroy (or free) the page unless B_FORCE 408*0Sstevel@tonic-gate * is set. We set the mod bit again and release 409*0Sstevel@tonic-gate * all locks on the page so that it will get written 410*0Sstevel@tonic-gate * back again later when things are hopefully 411*0Sstevel@tonic-gate * better again. 412*0Sstevel@tonic-gate * If B_INVAL and B_FORCE is set we really have 413*0Sstevel@tonic-gate * to destroy the page. 414*0Sstevel@tonic-gate */ 415*0Sstevel@tonic-gate if ((flags & (B_INVAL|B_FORCE)) == (B_INVAL|B_FORCE)) { 416*0Sstevel@tonic-gate page_io_unlock(pp); 417*0Sstevel@tonic-gate /*LINTED: constant in conditional context*/ 418*0Sstevel@tonic-gate VN_DISPOSE(pp, B_INVAL, 0, kcred); 419*0Sstevel@tonic-gate } else { 420*0Sstevel@tonic-gate hat_setmod(pp); 421*0Sstevel@tonic-gate page_io_unlock(pp); 422*0Sstevel@tonic-gate page_unlock(pp); 423*0Sstevel@tonic-gate } 424*0Sstevel@tonic-gate } else if (flags & B_INVAL) { 425*0Sstevel@tonic-gate /* 426*0Sstevel@tonic-gate * XXX - Failed writes with B_INVAL set are 427*0Sstevel@tonic-gate * not handled appropriately. 428*0Sstevel@tonic-gate */ 429*0Sstevel@tonic-gate page_io_unlock(pp); 430*0Sstevel@tonic-gate /*LINTED: constant in conditional context*/ 431*0Sstevel@tonic-gate VN_DISPOSE(pp, B_INVAL, 0, kcred); 432*0Sstevel@tonic-gate } else if (flags & B_FREE ||!hat_page_is_mapped(pp)) { 433*0Sstevel@tonic-gate /* 434*0Sstevel@tonic-gate * Update statistics for pages being paged out 435*0Sstevel@tonic-gate */ 436*0Sstevel@tonic-gate if (pp->p_vnode) { 437*0Sstevel@tonic-gate if (IS_SWAPFSVP(pp->p_vnode)) { 438*0Sstevel@tonic-gate anonpgout++; 439*0Sstevel@tonic-gate } else { 440*0Sstevel@tonic-gate if (pp->p_vnode->v_flag & VVMEXEC) { 441*0Sstevel@tonic-gate execpgout++; 442*0Sstevel@tonic-gate } else { 443*0Sstevel@tonic-gate fspgout++; 444*0Sstevel@tonic-gate } 445*0Sstevel@tonic-gate } 446*0Sstevel@tonic-gate } 447*0Sstevel@tonic-gate page_io_unlock(pp); 448*0Sstevel@tonic-gate pgout = 1; 449*0Sstevel@tonic-gate pgpgout++; 450*0Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGE_WS_OUT, 451*0Sstevel@tonic-gate "page_ws_out:pp %p", pp); 452*0Sstevel@tonic-gate 453*0Sstevel@tonic-gate /* 454*0Sstevel@tonic-gate * The page_struct_lock need not be acquired to 455*0Sstevel@tonic-gate * examine "p_lckcnt" and "p_cowcnt" since we'll 456*0Sstevel@tonic-gate * have an "exclusive" lock if the upgrade succeeds. 457*0Sstevel@tonic-gate */ 458*0Sstevel@tonic-gate if (page_tryupgrade(pp) && 459*0Sstevel@tonic-gate pp->p_lckcnt == 0 && pp->p_cowcnt == 0) { 460*0Sstevel@tonic-gate /* 461*0Sstevel@tonic-gate * Check if someone has reclaimed the 462*0Sstevel@tonic-gate * page. If ref and mod are not set, no 463*0Sstevel@tonic-gate * one is using it so we can free it. 464*0Sstevel@tonic-gate * The rest of the system is careful 465*0Sstevel@tonic-gate * to use the NOSYNC flag to unload 466*0Sstevel@tonic-gate * translations set up for i/o w/o 467*0Sstevel@tonic-gate * affecting ref and mod bits. 468*0Sstevel@tonic-gate * 469*0Sstevel@tonic-gate * Obtain a copy of the real hardware 470*0Sstevel@tonic-gate * mod bit using hat_pagesync(pp, HAT_DONTZERO) 471*0Sstevel@tonic-gate * to avoid having to flush the cache. 472*0Sstevel@tonic-gate */ 473*0Sstevel@tonic-gate ppattr = hat_pagesync(pp, HAT_SYNC_DONTZERO | 474*0Sstevel@tonic-gate HAT_SYNC_STOPON_MOD); 475*0Sstevel@tonic-gate ck_refmod: 476*0Sstevel@tonic-gate if (!(ppattr & (P_REF | P_MOD))) { 477*0Sstevel@tonic-gate if (hat_page_is_mapped(pp)) { 478*0Sstevel@tonic-gate /* 479*0Sstevel@tonic-gate * Doesn't look like the page 480*0Sstevel@tonic-gate * was modified so now we 481*0Sstevel@tonic-gate * really have to unload the 482*0Sstevel@tonic-gate * translations. Meanwhile 483*0Sstevel@tonic-gate * another CPU could've 484*0Sstevel@tonic-gate * modified it so we have to 485*0Sstevel@tonic-gate * check again. We don't loop 486*0Sstevel@tonic-gate * forever here because now 487*0Sstevel@tonic-gate * the translations are gone 488*0Sstevel@tonic-gate * and no one can get a new one 489*0Sstevel@tonic-gate * since we have the "exclusive" 490*0Sstevel@tonic-gate * lock on the page. 491*0Sstevel@tonic-gate */ 492*0Sstevel@tonic-gate (void) hat_pageunload(pp, 493*0Sstevel@tonic-gate HAT_FORCE_PGUNLOAD); 494*0Sstevel@tonic-gate ppattr = hat_page_getattr(pp, 495*0Sstevel@tonic-gate P_REF | P_MOD); 496*0Sstevel@tonic-gate goto ck_refmod; 497*0Sstevel@tonic-gate } 498*0Sstevel@tonic-gate /* 499*0Sstevel@tonic-gate * Update statistics for pages being 500*0Sstevel@tonic-gate * freed 501*0Sstevel@tonic-gate */ 502*0Sstevel@tonic-gate if (pp->p_vnode) { 503*0Sstevel@tonic-gate if (IS_SWAPFSVP(pp->p_vnode)) { 504*0Sstevel@tonic-gate anonfree++; 505*0Sstevel@tonic-gate } else { 506*0Sstevel@tonic-gate if (pp->p_vnode->v_flag 507*0Sstevel@tonic-gate & VVMEXEC) { 508*0Sstevel@tonic-gate execfree++; 509*0Sstevel@tonic-gate } else { 510*0Sstevel@tonic-gate fsfree++; 511*0Sstevel@tonic-gate } 512*0Sstevel@tonic-gate } 513*0Sstevel@tonic-gate } 514*0Sstevel@tonic-gate /*LINTED: constant in conditional ctx*/ 515*0Sstevel@tonic-gate VN_DISPOSE(pp, B_FREE, 516*0Sstevel@tonic-gate (flags & B_DONTNEED), kcred); 517*0Sstevel@tonic-gate dfree++; 518*0Sstevel@tonic-gate } else { 519*0Sstevel@tonic-gate page_unlock(pp); 520*0Sstevel@tonic-gate pgrec++; 521*0Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PAGE_WS_FREE, 522*0Sstevel@tonic-gate "page_ws_free:pp %p", pp); 523*0Sstevel@tonic-gate } 524*0Sstevel@tonic-gate } else { 525*0Sstevel@tonic-gate /* 526*0Sstevel@tonic-gate * Page is either `locked' in memory 527*0Sstevel@tonic-gate * or was reclaimed and now has a 528*0Sstevel@tonic-gate * "shared" lock, so release it. 529*0Sstevel@tonic-gate */ 530*0Sstevel@tonic-gate page_unlock(pp); 531*0Sstevel@tonic-gate } 532*0Sstevel@tonic-gate } else { 533*0Sstevel@tonic-gate /* 534*0Sstevel@tonic-gate * Neither B_FREE nor B_INVAL nor B_ERROR. 535*0Sstevel@tonic-gate * Just release locks. 536*0Sstevel@tonic-gate */ 537*0Sstevel@tonic-gate page_io_unlock(pp); 538*0Sstevel@tonic-gate page_unlock(pp); 539*0Sstevel@tonic-gate } 540*0Sstevel@tonic-gate } 541*0Sstevel@tonic-gate 542*0Sstevel@tonic-gate CPU_STATS_ENTER_K(); 543*0Sstevel@tonic-gate cpup = CPU; /* get cpup now that CPU cannot change */ 544*0Sstevel@tonic-gate CPU_STATS_ADDQ(cpup, vm, dfree, dfree); 545*0Sstevel@tonic-gate CPU_STATS_ADDQ(cpup, vm, pgrec, pgrec); 546*0Sstevel@tonic-gate CPU_STATS_ADDQ(cpup, vm, pgout, pgout); 547*0Sstevel@tonic-gate CPU_STATS_ADDQ(cpup, vm, pgpgout, pgpgout); 548*0Sstevel@tonic-gate CPU_STATS_ADDQ(cpup, vm, anonpgout, anonpgout); 549*0Sstevel@tonic-gate CPU_STATS_ADDQ(cpup, vm, anonfree, anonfree); 550*0Sstevel@tonic-gate CPU_STATS_ADDQ(cpup, vm, fspgout, fspgout); 551*0Sstevel@tonic-gate CPU_STATS_ADDQ(cpup, vm, fsfree, fsfree); 552*0Sstevel@tonic-gate CPU_STATS_ADDQ(cpup, vm, execpgout, execpgout); 553*0Sstevel@tonic-gate CPU_STATS_ADDQ(cpup, vm, execfree, execfree); 554*0Sstevel@tonic-gate CPU_STATS_EXIT_K(); 555*0Sstevel@tonic-gate 556*0Sstevel@tonic-gate /* Kernel probe */ 557*0Sstevel@tonic-gate TNF_PROBE_4(pageout, "vm pageio io", /* CSTYLED */, 558*0Sstevel@tonic-gate tnf_opaque, vnode, vp, 559*0Sstevel@tonic-gate tnf_ulong, pages_pageout, pgpgout, 560*0Sstevel@tonic-gate tnf_ulong, pages_freed, dfree, 561*0Sstevel@tonic-gate tnf_ulong, pages_reclaimed, pgrec); 562*0Sstevel@tonic-gate } 563*0Sstevel@tonic-gate 564*0Sstevel@tonic-gate /* 565*0Sstevel@tonic-gate * Flags are composed of {B_ASYNC, B_INVAL, B_FREE, B_DONTNEED, B_DELWRI, 566*0Sstevel@tonic-gate * B_TRUNC, B_FORCE}. B_DELWRI indicates that this page is part of a kluster 567*0Sstevel@tonic-gate * operation and is only to be considered if it doesn't involve any 568*0Sstevel@tonic-gate * waiting here. B_TRUNC indicates that the file is being truncated 569*0Sstevel@tonic-gate * and so no i/o needs to be done. B_FORCE indicates that the page 570*0Sstevel@tonic-gate * must be destroyed so don't try wrting it out. 571*0Sstevel@tonic-gate * 572*0Sstevel@tonic-gate * The caller must ensure that the page is locked. Returns 1, if 573*0Sstevel@tonic-gate * the page should be written back (the "iolock" is held in this 574*0Sstevel@tonic-gate * case), or 0 if the page has been dealt with or has been 575*0Sstevel@tonic-gate * unlocked. 576*0Sstevel@tonic-gate */ 577*0Sstevel@tonic-gate int 578*0Sstevel@tonic-gate pvn_getdirty(page_t *pp, int flags) 579*0Sstevel@tonic-gate { 580*0Sstevel@tonic-gate ASSERT((flags & (B_INVAL | B_FREE)) ? 581*0Sstevel@tonic-gate PAGE_EXCL(pp) : PAGE_SHARED(pp)); 582*0Sstevel@tonic-gate ASSERT(PP_ISFREE(pp) == 0); 583*0Sstevel@tonic-gate 584*0Sstevel@tonic-gate /* 585*0Sstevel@tonic-gate * If trying to invalidate or free a logically `locked' page, 586*0Sstevel@tonic-gate * forget it. Don't need page_struct_lock to check p_lckcnt and 587*0Sstevel@tonic-gate * p_cowcnt as the page is exclusively locked. 588*0Sstevel@tonic-gate */ 589*0Sstevel@tonic-gate if ((flags & (B_INVAL | B_FREE)) && !(flags & (B_TRUNC|B_FORCE)) && 590*0Sstevel@tonic-gate (pp->p_lckcnt != 0 || pp->p_cowcnt != 0)) { 591*0Sstevel@tonic-gate page_unlock(pp); 592*0Sstevel@tonic-gate return (0); 593*0Sstevel@tonic-gate } 594*0Sstevel@tonic-gate 595*0Sstevel@tonic-gate /* 596*0Sstevel@tonic-gate * Now acquire the i/o lock so we can add it to the dirty 597*0Sstevel@tonic-gate * list (if necessary). We avoid blocking on the i/o lock 598*0Sstevel@tonic-gate * in the following cases: 599*0Sstevel@tonic-gate * 600*0Sstevel@tonic-gate * If B_DELWRI is set, which implies that this request is 601*0Sstevel@tonic-gate * due to a klustering operartion. 602*0Sstevel@tonic-gate * 603*0Sstevel@tonic-gate * If this is an async (B_ASYNC) operation and we are not doing 604*0Sstevel@tonic-gate * invalidation (B_INVAL) [The current i/o or fsflush will ensure 605*0Sstevel@tonic-gate * that the the page is written out]. 606*0Sstevel@tonic-gate */ 607*0Sstevel@tonic-gate if ((flags & B_DELWRI) || ((flags & (B_INVAL | B_ASYNC)) == B_ASYNC)) { 608*0Sstevel@tonic-gate if (!page_io_trylock(pp)) { 609*0Sstevel@tonic-gate page_unlock(pp); 610*0Sstevel@tonic-gate return (0); 611*0Sstevel@tonic-gate } 612*0Sstevel@tonic-gate } else { 613*0Sstevel@tonic-gate page_io_lock(pp); 614*0Sstevel@tonic-gate } 615*0Sstevel@tonic-gate 616*0Sstevel@tonic-gate /* 617*0Sstevel@tonic-gate * If we want to free or invalidate the page then 618*0Sstevel@tonic-gate * we need to unload it so that anyone who wants 619*0Sstevel@tonic-gate * it will have to take a minor fault to get it. 620*0Sstevel@tonic-gate * Otherwise, we're just writing the page back so we 621*0Sstevel@tonic-gate * need to sync up the hardwre and software mod bit to 622*0Sstevel@tonic-gate * detect any future modifications. We clear the 623*0Sstevel@tonic-gate * software mod bit when we put the page on the dirty 624*0Sstevel@tonic-gate * list. 625*0Sstevel@tonic-gate */ 626*0Sstevel@tonic-gate if (flags & (B_INVAL | B_FREE)) { 627*0Sstevel@tonic-gate (void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD); 628*0Sstevel@tonic-gate } else { 629*0Sstevel@tonic-gate (void) hat_pagesync(pp, HAT_SYNC_ZERORM); 630*0Sstevel@tonic-gate } 631*0Sstevel@tonic-gate 632*0Sstevel@tonic-gate if (!hat_ismod(pp) || (flags & B_TRUNC)) { 633*0Sstevel@tonic-gate /* 634*0Sstevel@tonic-gate * Don't need to add it to the 635*0Sstevel@tonic-gate * list after all. 636*0Sstevel@tonic-gate */ 637*0Sstevel@tonic-gate page_io_unlock(pp); 638*0Sstevel@tonic-gate if (flags & B_INVAL) { 639*0Sstevel@tonic-gate /*LINTED: constant in conditional context*/ 640*0Sstevel@tonic-gate VN_DISPOSE(pp, B_INVAL, 0, kcred); 641*0Sstevel@tonic-gate } else if (flags & B_FREE) { 642*0Sstevel@tonic-gate /*LINTED: constant in conditional context*/ 643*0Sstevel@tonic-gate VN_DISPOSE(pp, B_FREE, (flags & B_DONTNEED), kcred); 644*0Sstevel@tonic-gate } else { 645*0Sstevel@tonic-gate /* 646*0Sstevel@tonic-gate * This is advisory path for the callers 647*0Sstevel@tonic-gate * of VOP_PUTPAGE() who prefer freeing the 648*0Sstevel@tonic-gate * page _only_ if no one else is accessing it. 649*0Sstevel@tonic-gate * E.g. segmap_release() 650*0Sstevel@tonic-gate * 651*0Sstevel@tonic-gate * The above hat_ismod() check is useless because: 652*0Sstevel@tonic-gate * (1) we may not be holding SE_EXCL lock; 653*0Sstevel@tonic-gate * (2) we've not unloaded _all_ translations 654*0Sstevel@tonic-gate * 655*0Sstevel@tonic-gate * Let page_release() do the heavy-lifting. 656*0Sstevel@tonic-gate */ 657*0Sstevel@tonic-gate (void) page_release(pp, 1); 658*0Sstevel@tonic-gate } 659*0Sstevel@tonic-gate return (0); 660*0Sstevel@tonic-gate } 661*0Sstevel@tonic-gate 662*0Sstevel@tonic-gate /* 663*0Sstevel@tonic-gate * Page is dirty, get it ready for the write back 664*0Sstevel@tonic-gate * and add page to the dirty list. 665*0Sstevel@tonic-gate */ 666*0Sstevel@tonic-gate hat_clrrefmod(pp); 667*0Sstevel@tonic-gate 668*0Sstevel@tonic-gate /* 669*0Sstevel@tonic-gate * If we're going to free the page when we're done 670*0Sstevel@tonic-gate * then we can let others try to use it starting now. 671*0Sstevel@tonic-gate * We'll detect the fact that they used it when the 672*0Sstevel@tonic-gate * i/o is done and avoid freeing the page. 673*0Sstevel@tonic-gate */ 674*0Sstevel@tonic-gate if (flags & B_FREE) 675*0Sstevel@tonic-gate page_downgrade(pp); 676*0Sstevel@tonic-gate 677*0Sstevel@tonic-gate 678*0Sstevel@tonic-gate TRACE_1(TR_FAC_VM, TR_PVN_GETDIRTY, "pvn_getdirty:pp %p", pp); 679*0Sstevel@tonic-gate 680*0Sstevel@tonic-gate return (1); 681*0Sstevel@tonic-gate } 682*0Sstevel@tonic-gate 683*0Sstevel@tonic-gate 684*0Sstevel@tonic-gate /*ARGSUSED*/ 685*0Sstevel@tonic-gate static int 686*0Sstevel@tonic-gate marker_constructor(void *buf, void *cdrarg, int kmflags) 687*0Sstevel@tonic-gate { 688*0Sstevel@tonic-gate page_t *mark = buf; 689*0Sstevel@tonic-gate bzero(mark, sizeof (page_t)); 690*0Sstevel@tonic-gate return (0); 691*0Sstevel@tonic-gate } 692*0Sstevel@tonic-gate 693*0Sstevel@tonic-gate void 694*0Sstevel@tonic-gate pvn_init() 695*0Sstevel@tonic-gate { 696*0Sstevel@tonic-gate if (pvn_vmodsort_disable == 0) 697*0Sstevel@tonic-gate pvn_vmodsort_supported = hat_supported(HAT_VMODSORT, NULL); 698*0Sstevel@tonic-gate marker_cache = kmem_cache_create("marker_cache", 699*0Sstevel@tonic-gate sizeof (page_t), 0, marker_constructor, 700*0Sstevel@tonic-gate NULL, NULL, NULL, NULL, 0); 701*0Sstevel@tonic-gate } 702*0Sstevel@tonic-gate 703*0Sstevel@tonic-gate 704*0Sstevel@tonic-gate /* 705*0Sstevel@tonic-gate * Process a vnode's page list for all pages whose offset is >= off. 706*0Sstevel@tonic-gate * Pages are to either be free'd, invalidated, or written back to disk. 707*0Sstevel@tonic-gate * 708*0Sstevel@tonic-gate * An "exclusive" lock is acquired for each page if B_INVAL or B_FREE 709*0Sstevel@tonic-gate * is specified, otherwise they are "shared" locked. 710*0Sstevel@tonic-gate * 711*0Sstevel@tonic-gate * Flags are {B_ASYNC, B_INVAL, B_FREE, B_DONTNEED, B_TRUNC} 712*0Sstevel@tonic-gate * 713*0Sstevel@tonic-gate * Special marker page_t's are inserted in the list in order 714*0Sstevel@tonic-gate * to keep track of where we are in the list when locks are dropped. 715*0Sstevel@tonic-gate * 716*0Sstevel@tonic-gate * Note the list is circular and insertions can happen only at the 717*0Sstevel@tonic-gate * head and tail of the list. The algorithm ensures visiting all pages 718*0Sstevel@tonic-gate * on the list in the following way: 719*0Sstevel@tonic-gate * 720*0Sstevel@tonic-gate * Drop two marker pages at the end of the list. 721*0Sstevel@tonic-gate * 722*0Sstevel@tonic-gate * Move one marker page backwards towards the start of the list until 723*0Sstevel@tonic-gate * it is at the list head, processing the pages passed along the way. 724*0Sstevel@tonic-gate * 725*0Sstevel@tonic-gate * Due to race conditions when the vphm mutex is dropped, additional pages 726*0Sstevel@tonic-gate * can be added to either end of the list, so we'll continue to move 727*0Sstevel@tonic-gate * the marker and process pages until it is up against the end marker. 728*0Sstevel@tonic-gate * 729*0Sstevel@tonic-gate * There is one special exit condition. If we are processing a VMODSORT 730*0Sstevel@tonic-gate * vnode and only writing back modified pages, we can stop as soon as 731*0Sstevel@tonic-gate * we run into an unmodified page. This makes fsync(3) operations fast. 732*0Sstevel@tonic-gate */ 733*0Sstevel@tonic-gate int 734*0Sstevel@tonic-gate pvn_vplist_dirty( 735*0Sstevel@tonic-gate vnode_t *vp, 736*0Sstevel@tonic-gate u_offset_t off, 737*0Sstevel@tonic-gate int (*putapage)(vnode_t *, page_t *, u_offset_t *, 738*0Sstevel@tonic-gate size_t *, int, cred_t *), 739*0Sstevel@tonic-gate int flags, 740*0Sstevel@tonic-gate cred_t *cred) 741*0Sstevel@tonic-gate { 742*0Sstevel@tonic-gate page_t *pp; 743*0Sstevel@tonic-gate page_t *mark; /* marker page that moves toward head */ 744*0Sstevel@tonic-gate page_t *end; /* marker page at end of list */ 745*0Sstevel@tonic-gate int err = 0; 746*0Sstevel@tonic-gate int error; 747*0Sstevel@tonic-gate kmutex_t *vphm; 748*0Sstevel@tonic-gate se_t se; 749*0Sstevel@tonic-gate page_t **where_to_move; 750*0Sstevel@tonic-gate 751*0Sstevel@tonic-gate ASSERT(vp->v_type != VCHR); 752*0Sstevel@tonic-gate 753*0Sstevel@tonic-gate if (vp->v_pages == NULL) 754*0Sstevel@tonic-gate return (0); 755*0Sstevel@tonic-gate 756*0Sstevel@tonic-gate 757*0Sstevel@tonic-gate /* 758*0Sstevel@tonic-gate * Serialize vplist_dirty operations on this vnode by setting VVMLOCK. 759*0Sstevel@tonic-gate * 760*0Sstevel@tonic-gate * Don't block on VVMLOCK if B_ASYNC is set. This prevents sync() 761*0Sstevel@tonic-gate * from getting blocked while flushing pages to a dead NFS server. 762*0Sstevel@tonic-gate */ 763*0Sstevel@tonic-gate mutex_enter(&vp->v_lock); 764*0Sstevel@tonic-gate if ((vp->v_flag & VVMLOCK) && (flags & B_ASYNC)) { 765*0Sstevel@tonic-gate mutex_exit(&vp->v_lock); 766*0Sstevel@tonic-gate return (EAGAIN); 767*0Sstevel@tonic-gate } 768*0Sstevel@tonic-gate 769*0Sstevel@tonic-gate while (vp->v_flag & VVMLOCK) 770*0Sstevel@tonic-gate cv_wait(&vp->v_cv, &vp->v_lock); 771*0Sstevel@tonic-gate 772*0Sstevel@tonic-gate if (vp->v_pages == NULL) { 773*0Sstevel@tonic-gate mutex_exit(&vp->v_lock); 774*0Sstevel@tonic-gate return (0); 775*0Sstevel@tonic-gate } 776*0Sstevel@tonic-gate 777*0Sstevel@tonic-gate vp->v_flag |= VVMLOCK; 778*0Sstevel@tonic-gate mutex_exit(&vp->v_lock); 779*0Sstevel@tonic-gate 780*0Sstevel@tonic-gate 781*0Sstevel@tonic-gate /* 782*0Sstevel@tonic-gate * Set up the marker pages used to walk the list 783*0Sstevel@tonic-gate */ 784*0Sstevel@tonic-gate end = kmem_cache_alloc(marker_cache, KM_SLEEP); 785*0Sstevel@tonic-gate end->p_vnode = vp; 786*0Sstevel@tonic-gate end->p_offset = (u_offset_t)-2; 787*0Sstevel@tonic-gate mark = kmem_cache_alloc(marker_cache, KM_SLEEP); 788*0Sstevel@tonic-gate mark->p_vnode = vp; 789*0Sstevel@tonic-gate mark->p_offset = (u_offset_t)-1; 790*0Sstevel@tonic-gate 791*0Sstevel@tonic-gate /* 792*0Sstevel@tonic-gate * Grab the lock protecting the vnode's page list 793*0Sstevel@tonic-gate * note that this lock is dropped at times in the loop. 794*0Sstevel@tonic-gate */ 795*0Sstevel@tonic-gate vphm = page_vnode_mutex(vp); 796*0Sstevel@tonic-gate mutex_enter(vphm); 797*0Sstevel@tonic-gate if (vp->v_pages == NULL) 798*0Sstevel@tonic-gate goto leave; 799*0Sstevel@tonic-gate 800*0Sstevel@tonic-gate /* 801*0Sstevel@tonic-gate * insert the markers and loop through the list of pages 802*0Sstevel@tonic-gate */ 803*0Sstevel@tonic-gate page_vpadd(&vp->v_pages->p_vpprev->p_vpnext, mark); 804*0Sstevel@tonic-gate page_vpadd(&mark->p_vpnext, end); 805*0Sstevel@tonic-gate for (;;) { 806*0Sstevel@tonic-gate 807*0Sstevel@tonic-gate /* 808*0Sstevel@tonic-gate * If only doing an async write back, then we can 809*0Sstevel@tonic-gate * stop as soon as we get to start of the list. 810*0Sstevel@tonic-gate */ 811*0Sstevel@tonic-gate if (flags == B_ASYNC && vp->v_pages == mark) 812*0Sstevel@tonic-gate break; 813*0Sstevel@tonic-gate 814*0Sstevel@tonic-gate /* 815*0Sstevel@tonic-gate * otherwise stop when we've gone through all the pages 816*0Sstevel@tonic-gate */ 817*0Sstevel@tonic-gate if (mark->p_vpprev == end) 818*0Sstevel@tonic-gate break; 819*0Sstevel@tonic-gate 820*0Sstevel@tonic-gate pp = mark->p_vpprev; 821*0Sstevel@tonic-gate if (vp->v_pages == pp) 822*0Sstevel@tonic-gate where_to_move = &vp->v_pages; 823*0Sstevel@tonic-gate else 824*0Sstevel@tonic-gate where_to_move = &pp->p_vpprev->p_vpnext; 825*0Sstevel@tonic-gate 826*0Sstevel@tonic-gate ASSERT(pp->p_vnode == vp); 827*0Sstevel@tonic-gate 828*0Sstevel@tonic-gate /* 829*0Sstevel@tonic-gate * Skip this page if the offset is out of the desired range. 830*0Sstevel@tonic-gate * Just move the marker and continue. 831*0Sstevel@tonic-gate */ 832*0Sstevel@tonic-gate if (pp->p_offset < off) { 833*0Sstevel@tonic-gate page_vpsub(&vp->v_pages, mark); 834*0Sstevel@tonic-gate page_vpadd(where_to_move, mark); 835*0Sstevel@tonic-gate continue; 836*0Sstevel@tonic-gate } 837*0Sstevel@tonic-gate 838*0Sstevel@tonic-gate /* 839*0Sstevel@tonic-gate * If just flushing dirty pages to disk and this vnode 840*0Sstevel@tonic-gate * is using a sorted list of pages, we can stop processing 841*0Sstevel@tonic-gate * as soon as we find an unmodified page. Since all the 842*0Sstevel@tonic-gate * modified pages are visited first. 843*0Sstevel@tonic-gate */ 844*0Sstevel@tonic-gate if (IS_VMODSORT(vp) && 845*0Sstevel@tonic-gate !(flags & (B_INVAL | B_FREE | B_TRUNC)) && 846*0Sstevel@tonic-gate !hat_ismod(pp)) { 847*0Sstevel@tonic-gate #ifdef DEBUG 848*0Sstevel@tonic-gate /* 849*0Sstevel@tonic-gate * For debug kernels examine what should be all the 850*0Sstevel@tonic-gate * remaining clean pages, asserting that they are 851*0Sstevel@tonic-gate * not modified. 852*0Sstevel@tonic-gate */ 853*0Sstevel@tonic-gate page_t *chk = pp; 854*0Sstevel@tonic-gate int attr; 855*0Sstevel@tonic-gate 856*0Sstevel@tonic-gate page_vpsub(&vp->v_pages, mark); 857*0Sstevel@tonic-gate page_vpadd(where_to_move, mark); 858*0Sstevel@tonic-gate do { 859*0Sstevel@tonic-gate chk = chk->p_vpprev; 860*0Sstevel@tonic-gate ASSERT(chk != end); 861*0Sstevel@tonic-gate if (chk == mark) 862*0Sstevel@tonic-gate continue; 863*0Sstevel@tonic-gate attr = hat_page_getattr(chk, P_MOD | P_REF); 864*0Sstevel@tonic-gate if ((attr & P_MOD) == 0) 865*0Sstevel@tonic-gate continue; 866*0Sstevel@tonic-gate panic("v_pages list not all clean: " 867*0Sstevel@tonic-gate "page_t*=%p vnode=%p off=%lx " 868*0Sstevel@tonic-gate "attr=0x%x last clean page_t*=%p\n", 869*0Sstevel@tonic-gate (void *)chk, (void *)chk->p_vnode, 870*0Sstevel@tonic-gate (long)chk->p_offset, attr, (void *)pp); 871*0Sstevel@tonic-gate } while (chk != vp->v_pages); 872*0Sstevel@tonic-gate #endif 873*0Sstevel@tonic-gate break; 874*0Sstevel@tonic-gate } 875*0Sstevel@tonic-gate 876*0Sstevel@tonic-gate /* 877*0Sstevel@tonic-gate * If we are supposed to invalidate or free this 878*0Sstevel@tonic-gate * page, then we need an exclusive lock. 879*0Sstevel@tonic-gate */ 880*0Sstevel@tonic-gate se = (flags & (B_INVAL | B_FREE)) ? SE_EXCL : SE_SHARED; 881*0Sstevel@tonic-gate 882*0Sstevel@tonic-gate /* 883*0Sstevel@tonic-gate * We must acquire the page lock for all synchronous 884*0Sstevel@tonic-gate * operations (invalidate, free and write). 885*0Sstevel@tonic-gate */ 886*0Sstevel@tonic-gate if ((flags & B_INVAL) != 0 || (flags & B_ASYNC) == 0) { 887*0Sstevel@tonic-gate /* 888*0Sstevel@tonic-gate * If the page_lock() drops the mutex 889*0Sstevel@tonic-gate * we must retry the loop. 890*0Sstevel@tonic-gate */ 891*0Sstevel@tonic-gate if (!page_lock(pp, se, vphm, P_NO_RECLAIM)) 892*0Sstevel@tonic-gate continue; 893*0Sstevel@tonic-gate 894*0Sstevel@tonic-gate /* 895*0Sstevel@tonic-gate * It's ok to move the marker page now. 896*0Sstevel@tonic-gate */ 897*0Sstevel@tonic-gate page_vpsub(&vp->v_pages, mark); 898*0Sstevel@tonic-gate page_vpadd(where_to_move, mark); 899*0Sstevel@tonic-gate } else { 900*0Sstevel@tonic-gate 901*0Sstevel@tonic-gate /* 902*0Sstevel@tonic-gate * update the marker page for all remaining cases 903*0Sstevel@tonic-gate */ 904*0Sstevel@tonic-gate page_vpsub(&vp->v_pages, mark); 905*0Sstevel@tonic-gate page_vpadd(where_to_move, mark); 906*0Sstevel@tonic-gate 907*0Sstevel@tonic-gate /* 908*0Sstevel@tonic-gate * For write backs, If we can't lock the page, it's 909*0Sstevel@tonic-gate * invalid or in the process of being destroyed. Skip 910*0Sstevel@tonic-gate * it, assuming someone else is writing it. 911*0Sstevel@tonic-gate */ 912*0Sstevel@tonic-gate if (!page_trylock(pp, se)) 913*0Sstevel@tonic-gate continue; 914*0Sstevel@tonic-gate } 915*0Sstevel@tonic-gate 916*0Sstevel@tonic-gate ASSERT(pp->p_vnode == vp); 917*0Sstevel@tonic-gate 918*0Sstevel@tonic-gate /* 919*0Sstevel@tonic-gate * Successfully locked the page, now figure out what to 920*0Sstevel@tonic-gate * do with it. Free pages are easily dealt with, invalidate 921*0Sstevel@tonic-gate * if desired or just go on to the next page. 922*0Sstevel@tonic-gate */ 923*0Sstevel@tonic-gate if (PP_ISFREE(pp)) { 924*0Sstevel@tonic-gate if ((flags & B_INVAL) == 0) { 925*0Sstevel@tonic-gate page_unlock(pp); 926*0Sstevel@tonic-gate continue; 927*0Sstevel@tonic-gate } 928*0Sstevel@tonic-gate 929*0Sstevel@tonic-gate /* 930*0Sstevel@tonic-gate * Invalidate (destroy) the page. 931*0Sstevel@tonic-gate */ 932*0Sstevel@tonic-gate mutex_exit(vphm); 933*0Sstevel@tonic-gate page_destroy_free(pp); 934*0Sstevel@tonic-gate mutex_enter(vphm); 935*0Sstevel@tonic-gate continue; 936*0Sstevel@tonic-gate } 937*0Sstevel@tonic-gate 938*0Sstevel@tonic-gate /* 939*0Sstevel@tonic-gate * pvn_getdirty() figures out what do do with a dirty page. 940*0Sstevel@tonic-gate * If the page is dirty, the putapage() routine will write it 941*0Sstevel@tonic-gate * and will kluster any other adjacent dirty pages it can. 942*0Sstevel@tonic-gate * 943*0Sstevel@tonic-gate * pvn_getdirty() and `(*putapage)' unlock the page. 944*0Sstevel@tonic-gate */ 945*0Sstevel@tonic-gate mutex_exit(vphm); 946*0Sstevel@tonic-gate if (pvn_getdirty(pp, flags)) { 947*0Sstevel@tonic-gate error = (*putapage)(vp, pp, NULL, NULL, flags, cred); 948*0Sstevel@tonic-gate if (!err) 949*0Sstevel@tonic-gate err = error; 950*0Sstevel@tonic-gate } 951*0Sstevel@tonic-gate mutex_enter(vphm); 952*0Sstevel@tonic-gate } 953*0Sstevel@tonic-gate page_vpsub(&vp->v_pages, mark); 954*0Sstevel@tonic-gate page_vpsub(&vp->v_pages, end); 955*0Sstevel@tonic-gate 956*0Sstevel@tonic-gate leave: 957*0Sstevel@tonic-gate /* 958*0Sstevel@tonic-gate * Release v_pages mutex, also VVMLOCK and wakeup blocked thrds 959*0Sstevel@tonic-gate */ 960*0Sstevel@tonic-gate mutex_exit(vphm); 961*0Sstevel@tonic-gate kmem_cache_free(marker_cache, mark); 962*0Sstevel@tonic-gate kmem_cache_free(marker_cache, end); 963*0Sstevel@tonic-gate mutex_enter(&vp->v_lock); 964*0Sstevel@tonic-gate vp->v_flag &= ~VVMLOCK; 965*0Sstevel@tonic-gate cv_broadcast(&vp->v_cv); 966*0Sstevel@tonic-gate mutex_exit(&vp->v_lock); 967*0Sstevel@tonic-gate return (err); 968*0Sstevel@tonic-gate } 969*0Sstevel@tonic-gate 970*0Sstevel@tonic-gate /* 971*0Sstevel@tonic-gate * Zero out zbytes worth of data. Caller should be aware that this 972*0Sstevel@tonic-gate * routine may enter back into the fs layer (xxx_getpage). Locks 973*0Sstevel@tonic-gate * that the xxx_getpage routine may need should not be held while 974*0Sstevel@tonic-gate * calling this. 975*0Sstevel@tonic-gate */ 976*0Sstevel@tonic-gate void 977*0Sstevel@tonic-gate pvn_vpzero(struct vnode *vp, u_offset_t vplen, size_t zbytes) 978*0Sstevel@tonic-gate { 979*0Sstevel@tonic-gate caddr_t addr; 980*0Sstevel@tonic-gate 981*0Sstevel@tonic-gate ASSERT(vp->v_type != VCHR); 982*0Sstevel@tonic-gate 983*0Sstevel@tonic-gate if (vp->v_pages == NULL) 984*0Sstevel@tonic-gate return; 985*0Sstevel@tonic-gate 986*0Sstevel@tonic-gate /* 987*0Sstevel@tonic-gate * zbytes may be zero but there still may be some portion of 988*0Sstevel@tonic-gate * a page which needs clearing (since zbytes is a function 989*0Sstevel@tonic-gate * of filesystem block size, not pagesize.) 990*0Sstevel@tonic-gate */ 991*0Sstevel@tonic-gate if (zbytes == 0 && (PAGESIZE - (vplen & PAGEOFFSET)) == 0) 992*0Sstevel@tonic-gate return; 993*0Sstevel@tonic-gate 994*0Sstevel@tonic-gate /* 995*0Sstevel@tonic-gate * We get the last page and handle the partial 996*0Sstevel@tonic-gate * zeroing via kernel mappings. This will make the page 997*0Sstevel@tonic-gate * dirty so that we know that when this page is written 998*0Sstevel@tonic-gate * back, the zeroed information will go out with it. If 999*0Sstevel@tonic-gate * the page is not currently in memory, then the kzero 1000*0Sstevel@tonic-gate * operation will cause it to be brought it. We use kzero 1001*0Sstevel@tonic-gate * instead of bzero so that if the page cannot be read in 1002*0Sstevel@tonic-gate * for any reason, the system will not panic. We need 1003*0Sstevel@tonic-gate * to zero out a minimum of the fs given zbytes, but we 1004*0Sstevel@tonic-gate * might also have to do more to get the entire last page. 1005*0Sstevel@tonic-gate */ 1006*0Sstevel@tonic-gate 1007*0Sstevel@tonic-gate if ((zbytes + (vplen & MAXBOFFSET)) > MAXBSIZE) 1008*0Sstevel@tonic-gate panic("pvn_vptrunc zbytes"); 1009*0Sstevel@tonic-gate addr = segmap_getmapflt(segkmap, vp, vplen, 1010*0Sstevel@tonic-gate MAX(zbytes, PAGESIZE - (vplen & PAGEOFFSET)), 1, S_WRITE); 1011*0Sstevel@tonic-gate (void) kzero(addr + (vplen & MAXBOFFSET), 1012*0Sstevel@tonic-gate MAX(zbytes, PAGESIZE - (vplen & PAGEOFFSET))); 1013*0Sstevel@tonic-gate (void) segmap_release(segkmap, addr, SM_WRITE | SM_ASYNC); 1014*0Sstevel@tonic-gate } 1015*0Sstevel@tonic-gate 1016*0Sstevel@tonic-gate /* 1017*0Sstevel@tonic-gate * Handles common work of the VOP_GETPAGE routines when more than 1018*0Sstevel@tonic-gate * one page must be returned by calling a file system specific operation 1019*0Sstevel@tonic-gate * to do most of the work. Must be called with the vp already locked 1020*0Sstevel@tonic-gate * by the VOP_GETPAGE routine. 1021*0Sstevel@tonic-gate */ 1022*0Sstevel@tonic-gate int 1023*0Sstevel@tonic-gate pvn_getpages( 1024*0Sstevel@tonic-gate int (*getpage)(vnode_t *, u_offset_t, size_t, uint_t *, page_t *[], 1025*0Sstevel@tonic-gate size_t, struct seg *, caddr_t, enum seg_rw, cred_t *), 1026*0Sstevel@tonic-gate struct vnode *vp, 1027*0Sstevel@tonic-gate u_offset_t off, 1028*0Sstevel@tonic-gate size_t len, 1029*0Sstevel@tonic-gate uint_t *protp, 1030*0Sstevel@tonic-gate page_t *pl[], 1031*0Sstevel@tonic-gate size_t plsz, 1032*0Sstevel@tonic-gate struct seg *seg, 1033*0Sstevel@tonic-gate caddr_t addr, 1034*0Sstevel@tonic-gate enum seg_rw rw, 1035*0Sstevel@tonic-gate struct cred *cred) 1036*0Sstevel@tonic-gate { 1037*0Sstevel@tonic-gate page_t **ppp; 1038*0Sstevel@tonic-gate u_offset_t o, eoff; 1039*0Sstevel@tonic-gate size_t sz, xlen; 1040*0Sstevel@tonic-gate int err; 1041*0Sstevel@tonic-gate 1042*0Sstevel@tonic-gate ASSERT(plsz >= len); /* insure that we have enough space */ 1043*0Sstevel@tonic-gate 1044*0Sstevel@tonic-gate /* 1045*0Sstevel@tonic-gate * Loop one page at a time and let getapage function fill 1046*0Sstevel@tonic-gate * in the next page in array. We only allow one page to be 1047*0Sstevel@tonic-gate * returned at a time (except for the last page) so that we 1048*0Sstevel@tonic-gate * don't have any problems with duplicates and other such 1049*0Sstevel@tonic-gate * painful problems. This is a very simple minded algorithm, 1050*0Sstevel@tonic-gate * but it does the job correctly. We hope that the cost of a 1051*0Sstevel@tonic-gate * getapage call for a resident page that we might have been 1052*0Sstevel@tonic-gate * able to get from an earlier call doesn't cost too much. 1053*0Sstevel@tonic-gate */ 1054*0Sstevel@tonic-gate ppp = pl; 1055*0Sstevel@tonic-gate sz = PAGESIZE; 1056*0Sstevel@tonic-gate eoff = off + len; 1057*0Sstevel@tonic-gate xlen = len; 1058*0Sstevel@tonic-gate for (o = off; o < eoff; o += PAGESIZE, addr += PAGESIZE, 1059*0Sstevel@tonic-gate xlen -= PAGESIZE) { 1060*0Sstevel@tonic-gate if (o + PAGESIZE >= eoff) { 1061*0Sstevel@tonic-gate /* 1062*0Sstevel@tonic-gate * Last time through - allow the all of 1063*0Sstevel@tonic-gate * what's left of the pl[] array to be used. 1064*0Sstevel@tonic-gate */ 1065*0Sstevel@tonic-gate sz = plsz - (o - off); 1066*0Sstevel@tonic-gate } 1067*0Sstevel@tonic-gate err = (*getpage)(vp, o, xlen, protp, ppp, sz, seg, addr, 1068*0Sstevel@tonic-gate rw, cred); 1069*0Sstevel@tonic-gate if (err) { 1070*0Sstevel@tonic-gate /* 1071*0Sstevel@tonic-gate * Release any pages we already got. 1072*0Sstevel@tonic-gate */ 1073*0Sstevel@tonic-gate if (o > off && pl != NULL) { 1074*0Sstevel@tonic-gate for (ppp = pl; *ppp != NULL; *ppp++ = NULL) 1075*0Sstevel@tonic-gate (void) page_release(*ppp, 1); 1076*0Sstevel@tonic-gate } 1077*0Sstevel@tonic-gate break; 1078*0Sstevel@tonic-gate } 1079*0Sstevel@tonic-gate if (pl != NULL) 1080*0Sstevel@tonic-gate ppp++; 1081*0Sstevel@tonic-gate } 1082*0Sstevel@tonic-gate return (err); 1083*0Sstevel@tonic-gate } 1084*0Sstevel@tonic-gate 1085*0Sstevel@tonic-gate /* 1086*0Sstevel@tonic-gate * Initialize the page list array. 1087*0Sstevel@tonic-gate */ 1088*0Sstevel@tonic-gate void 1089*0Sstevel@tonic-gate pvn_plist_init(page_t *pp, page_t *pl[], size_t plsz, 1090*0Sstevel@tonic-gate u_offset_t off, size_t io_len, enum seg_rw rw) 1091*0Sstevel@tonic-gate { 1092*0Sstevel@tonic-gate ssize_t sz; 1093*0Sstevel@tonic-gate page_t *ppcur, **ppp; 1094*0Sstevel@tonic-gate 1095*0Sstevel@tonic-gate if (plsz >= io_len) { 1096*0Sstevel@tonic-gate /* 1097*0Sstevel@tonic-gate * Everything fits, set up to load 1098*0Sstevel@tonic-gate * all the pages. 1099*0Sstevel@tonic-gate */ 1100*0Sstevel@tonic-gate sz = io_len; 1101*0Sstevel@tonic-gate } else { 1102*0Sstevel@tonic-gate /* 1103*0Sstevel@tonic-gate * Set up to load plsz worth 1104*0Sstevel@tonic-gate * starting at the needed page. 1105*0Sstevel@tonic-gate */ 1106*0Sstevel@tonic-gate while (pp->p_offset != off) { 1107*0Sstevel@tonic-gate /* XXX - Do we need this assert? */ 1108*0Sstevel@tonic-gate ASSERT(pp->p_next->p_offset != 1109*0Sstevel@tonic-gate pp->p_offset); 1110*0Sstevel@tonic-gate /* 1111*0Sstevel@tonic-gate * Remove page from the i/o list, 1112*0Sstevel@tonic-gate * release the i/o and the page lock. 1113*0Sstevel@tonic-gate */ 1114*0Sstevel@tonic-gate ppcur = pp; 1115*0Sstevel@tonic-gate page_sub(&pp, ppcur); 1116*0Sstevel@tonic-gate page_io_unlock(ppcur); 1117*0Sstevel@tonic-gate (void) page_release(ppcur, 1); 1118*0Sstevel@tonic-gate } 1119*0Sstevel@tonic-gate sz = plsz; 1120*0Sstevel@tonic-gate } 1121*0Sstevel@tonic-gate 1122*0Sstevel@tonic-gate /* 1123*0Sstevel@tonic-gate * Initialize the page list array. 1124*0Sstevel@tonic-gate */ 1125*0Sstevel@tonic-gate ppp = pl; 1126*0Sstevel@tonic-gate do { 1127*0Sstevel@tonic-gate ppcur = pp; 1128*0Sstevel@tonic-gate *ppp++ = ppcur; 1129*0Sstevel@tonic-gate page_sub(&pp, ppcur); 1130*0Sstevel@tonic-gate page_io_unlock(ppcur); 1131*0Sstevel@tonic-gate if (rw != S_CREATE) 1132*0Sstevel@tonic-gate page_downgrade(ppcur); 1133*0Sstevel@tonic-gate sz -= PAGESIZE; 1134*0Sstevel@tonic-gate } while (sz > 0 && pp != NULL); 1135*0Sstevel@tonic-gate *ppp = NULL; /* terminate list */ 1136*0Sstevel@tonic-gate 1137*0Sstevel@tonic-gate /* 1138*0Sstevel@tonic-gate * Now free the remaining pages that weren't 1139*0Sstevel@tonic-gate * loaded in the page list. 1140*0Sstevel@tonic-gate */ 1141*0Sstevel@tonic-gate while (pp != NULL) { 1142*0Sstevel@tonic-gate ppcur = pp; 1143*0Sstevel@tonic-gate page_sub(&pp, ppcur); 1144*0Sstevel@tonic-gate page_io_unlock(ppcur); 1145*0Sstevel@tonic-gate (void) page_release(ppcur, 1); 1146*0Sstevel@tonic-gate } 1147*0Sstevel@tonic-gate } 1148