1 /* $NetBSD: uvm_vnode.c,v 1.19 1999/03/04 06:48:54 chs Exp $ */ 2 3 /* 4 * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE! 5 * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<< 6 */ 7 /* 8 * Copyright (c) 1997 Charles D. Cranor and Washington University. 9 * Copyright (c) 1991, 1993 10 * The Regents of the University of California. 11 * Copyright (c) 1990 University of Utah. 12 * 13 * All rights reserved. 14 * 15 * This code is derived from software contributed to Berkeley by 16 * the Systems Programming Group of the University of Utah Computer 17 * Science Department. 18 * 19 * Redistribution and use in source and binary forms, with or without 20 * modification, are permitted provided that the following conditions 21 * are met: 22 * 1. Redistributions of source code must retain the above copyright 23 * notice, this list of conditions and the following disclaimer. 24 * 2. Redistributions in binary form must reproduce the above copyright 25 * notice, this list of conditions and the following disclaimer in the 26 * documentation and/or other materials provided with the distribution. 27 * 3. All advertising materials mentioning features or use of this software 28 * must display the following acknowledgement: 29 * This product includes software developed by Charles D. Cranor, 30 * Washington University, the University of California, Berkeley and 31 * its contributors. 32 * 4. Neither the name of the University nor the names of its contributors 33 * may be used to endorse or promote products derived from this software 34 * without specific prior written permission. 35 * 36 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 37 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 39 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 41 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 42 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 44 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 45 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 46 * SUCH DAMAGE. 47 * 48 * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94 49 * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp 50 */ 51 52 #include "fs_nfs.h" 53 #include "opt_uvmhist.h" 54 55 /* 56 * uvm_vnode.c: the vnode pager. 57 */ 58 59 #include <sys/param.h> 60 #include <sys/systm.h> 61 #include <sys/proc.h> 62 #include <sys/malloc.h> 63 #include <sys/vnode.h> 64 #include <sys/disklabel.h> 65 #include <sys/ioctl.h> 66 #include <sys/fcntl.h> 67 #include <sys/conf.h> 68 69 #include <miscfs/specfs/specdev.h> 70 71 #include <vm/vm.h> 72 #include <vm/vm_page.h> 73 #include <vm/vm_kern.h> 74 75 #include <uvm/uvm.h> 76 #include <uvm/uvm_vnode.h> 77 78 /* 79 * private global data structure 80 * 81 * we keep a list of writeable active vnode-backed VM objects for sync op. 82 * we keep a simpleq of vnodes that are currently being sync'd. 83 */ 84 85 LIST_HEAD(uvn_list_struct, uvm_vnode); 86 static struct uvn_list_struct uvn_wlist; /* writeable uvns */ 87 static simple_lock_data_t uvn_wl_lock; /* locks uvn_wlist */ 88 89 SIMPLEQ_HEAD(uvn_sq_struct, uvm_vnode); 90 static struct uvn_sq_struct uvn_sync_q; /* sync'ing uvns */ 91 lock_data_t uvn_sync_lock; /* locks sync operation */ 92 93 /* 94 * functions 95 */ 96 97 static int uvn_asyncget __P((struct uvm_object *, vaddr_t, 98 int)); 99 struct uvm_object *uvn_attach __P((void *, vm_prot_t)); 100 static void uvn_cluster __P((struct uvm_object *, vaddr_t, 101 vaddr_t *, vaddr_t *)); 102 static void uvn_detach __P((struct uvm_object *)); 103 static boolean_t uvn_flush __P((struct uvm_object *, vaddr_t, 104 vaddr_t, int)); 105 static int uvn_get __P((struct uvm_object *, vaddr_t, 106 vm_page_t *, int *, int, 107 vm_prot_t, int, int)); 108 static void uvn_init __P((void)); 109 static int uvn_io __P((struct uvm_vnode *, vm_page_t *, 110 int, int, int)); 111 static int uvn_put __P((struct uvm_object *, vm_page_t *, 112 int, boolean_t)); 113 static void uvn_reference __P((struct uvm_object *)); 114 static boolean_t uvn_releasepg __P((struct vm_page *, 115 struct vm_page **)); 116 117 /* 118 * master pager structure 119 */ 120 121 struct uvm_pagerops uvm_vnodeops = { 122 uvn_init, 123 uvn_attach, 124 uvn_reference, 125 uvn_detach, 126 NULL, /* no specialized fault routine required */ 127 uvn_flush, 128 uvn_get, 129 uvn_asyncget, 130 uvn_put, 131 uvn_cluster, 132 uvm_mk_pcluster, /* use generic version of this: see uvm_pager.c */ 133 uvm_shareprot, /* !NULL: allow us in share maps */ 134 NULL, /* AIO-DONE function (not until we have asyncio) */ 135 uvn_releasepg, 136 }; 137 138 /* 139 * the ops! 140 */ 141 142 /* 143 * uvn_init 144 * 145 * init pager private data structures. 146 */ 147 148 static void 149 uvn_init() 150 { 151 152 LIST_INIT(&uvn_wlist); 153 simple_lock_init(&uvn_wl_lock); 154 /* note: uvn_sync_q init'd in uvm_vnp_sync() */ 155 lockinit(&uvn_sync_lock, PVM, "uvnsync", 0, 0); 156 } 157 158 /* 159 * uvn_attach 160 * 161 * attach a vnode structure to a VM object. if the vnode is already 162 * attached, then just bump the reference count by one and return the 163 * VM object. if not already attached, attach and return the new VM obj. 164 * the "accessprot" tells the max access the attaching thread wants to 165 * our pages. 166 * 167 * => caller must _not_ already be holding the lock on the uvm_object. 168 * => in fact, nothing should be locked so that we can sleep here. 169 * => note that uvm_object is first thing in vnode structure, so their 170 * pointers are equiv. 171 */ 172 173 struct uvm_object * 174 uvn_attach(arg, accessprot) 175 void *arg; 176 vm_prot_t accessprot; 177 { 178 struct vnode *vp = arg; 179 struct uvm_vnode *uvn = &vp->v_uvm; 180 struct vattr vattr; 181 int oldflags, result; 182 struct partinfo pi; 183 u_quad_t used_vnode_size; 184 UVMHIST_FUNC("uvn_attach"); UVMHIST_CALLED(maphist); 185 186 UVMHIST_LOG(maphist, "(vn=0x%x)", arg,0,0,0); 187 188 used_vnode_size = (u_quad_t)0; /* XXX gcc -Wuninitialized */ 189 190 /* 191 * first get a lock on the uvn. 192 */ 193 simple_lock(&uvn->u_obj.vmobjlock); 194 while (uvn->u_flags & UVM_VNODE_BLOCKED) { 195 uvn->u_flags |= UVM_VNODE_WANTED; 196 UVMHIST_LOG(maphist, " SLEEPING on blocked vn",0,0,0,0); 197 UVM_UNLOCK_AND_WAIT(uvn, &uvn->u_obj.vmobjlock, FALSE, 198 "uvn_attach", 0); 199 simple_lock(&uvn->u_obj.vmobjlock); 200 UVMHIST_LOG(maphist," WOKE UP",0,0,0,0); 201 } 202 203 /* 204 * if we're mapping a BLK device, make sure it is a disk. 205 */ 206 if (vp->v_type == VBLK && bdevsw[major(vp->v_rdev)].d_type != D_DISK) { 207 simple_unlock(&uvn->u_obj.vmobjlock); /* drop lock */ 208 UVMHIST_LOG(maphist,"<- done (VBLK not D_DISK!)", 0,0,0,0); 209 return(NULL); 210 } 211 212 /* 213 * now we have lock and uvn must not be in a blocked state. 214 * first check to see if it is already active, in which case 215 * we can bump the reference count, check to see if we need to 216 * add it to the writeable list, and then return. 217 */ 218 if (uvn->u_flags & UVM_VNODE_VALID) { /* already active? */ 219 220 /* regain VREF if we were persisting */ 221 if (uvn->u_obj.uo_refs == 0) { 222 VREF(vp); 223 UVMHIST_LOG(maphist," VREF (reclaim persisting vnode)", 224 0,0,0,0); 225 } 226 uvn->u_obj.uo_refs++; /* bump uvn ref! */ 227 228 /* check for new writeable uvn */ 229 if ((accessprot & VM_PROT_WRITE) != 0 && 230 (uvn->u_flags & UVM_VNODE_WRITEABLE) == 0) { 231 simple_lock(&uvn_wl_lock); 232 LIST_INSERT_HEAD(&uvn_wlist, uvn, u_wlist); 233 simple_unlock(&uvn_wl_lock); 234 /* we are now on wlist! */ 235 uvn->u_flags |= UVM_VNODE_WRITEABLE; 236 } 237 238 /* unlock and return */ 239 simple_unlock(&uvn->u_obj.vmobjlock); 240 UVMHIST_LOG(maphist,"<- done, refcnt=%d", uvn->u_obj.uo_refs, 241 0, 0, 0); 242 return (&uvn->u_obj); 243 } 244 245 /* 246 * need to call VOP_GETATTR() to get the attributes, but that could 247 * block (due to I/O), so we want to unlock the object before calling. 248 * however, we want to keep anyone else from playing with the object 249 * while it is unlocked. to do this we set UVM_VNODE_ALOCK which 250 * prevents anyone from attaching to the vnode until we are done with 251 * it. 252 */ 253 uvn->u_flags = UVM_VNODE_ALOCK; 254 simple_unlock(&uvn->u_obj.vmobjlock); /* drop lock in case we sleep */ 255 /* XXX: curproc? */ 256 257 if (vp->v_type == VBLK) { 258 /* 259 * We could implement this as a specfs getattr call, but: 260 * 261 * (1) VOP_GETATTR() would get the file system 262 * vnode operation, not the specfs operation. 263 * 264 * (2) All we want is the size, anyhow. 265 */ 266 result = (*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, 267 DIOCGPART, (caddr_t)&pi, FREAD, curproc); 268 if (result == 0) { 269 /* XXX should remember blocksize */ 270 used_vnode_size = (u_quad_t)pi.disklab->d_secsize * 271 (u_quad_t)pi.part->p_size; 272 } 273 } else { 274 result = VOP_GETATTR(vp, &vattr, curproc->p_ucred, curproc); 275 if (result == 0) 276 used_vnode_size = vattr.va_size; 277 } 278 279 /* relock object */ 280 simple_lock(&uvn->u_obj.vmobjlock); 281 282 if (result != 0) { 283 if (uvn->u_flags & UVM_VNODE_WANTED) 284 wakeup(uvn); 285 uvn->u_flags = 0; 286 simple_unlock(&uvn->u_obj.vmobjlock); /* drop lock */ 287 UVMHIST_LOG(maphist,"<- done (VOP_GETATTR FAILED!)", 0,0,0,0); 288 return(NULL); 289 } 290 291 /* 292 * make sure that the newsize fits within a vaddr_t 293 * XXX: need to revise addressing data types 294 */ 295 #ifdef DEBUG 296 if (vp->v_type == VBLK) 297 printf("used_vnode_size = %qu\n", (long long)used_vnode_size); 298 #endif 299 if (used_vnode_size > (vaddr_t) -PAGE_SIZE) { 300 #ifdef DEBUG 301 printf("uvn_attach: vn %p size truncated %qx->%x\n", vp, 302 (long long)used_vnode_size, -PAGE_SIZE); 303 #endif 304 used_vnode_size = (vaddr_t) -PAGE_SIZE; 305 } 306 307 /* 308 * now set up the uvn. 309 */ 310 uvn->u_obj.pgops = &uvm_vnodeops; 311 TAILQ_INIT(&uvn->u_obj.memq); 312 uvn->u_obj.uo_npages = 0; 313 uvn->u_obj.uo_refs = 1; /* just us... */ 314 oldflags = uvn->u_flags; 315 uvn->u_flags = UVM_VNODE_VALID|UVM_VNODE_CANPERSIST; 316 uvn->u_nio = 0; 317 uvn->u_size = used_vnode_size; 318 319 /* if write access, we need to add it to the wlist */ 320 if (accessprot & VM_PROT_WRITE) { 321 simple_lock(&uvn_wl_lock); 322 LIST_INSERT_HEAD(&uvn_wlist, uvn, u_wlist); 323 simple_unlock(&uvn_wl_lock); 324 uvn->u_flags |= UVM_VNODE_WRITEABLE; /* we are on wlist! */ 325 } 326 327 /* 328 * add a reference to the vnode. this reference will stay as long 329 * as there is a valid mapping of the vnode. dropped when the 330 * reference count goes to zero [and we either free or persist]. 331 */ 332 VREF(vp); 333 simple_unlock(&uvn->u_obj.vmobjlock); 334 if (oldflags & UVM_VNODE_WANTED) 335 wakeup(uvn); 336 337 UVMHIST_LOG(maphist,"<- done/VREF, ret 0x%x", &uvn->u_obj,0,0,0); 338 return(&uvn->u_obj); 339 } 340 341 342 /* 343 * uvn_reference 344 * 345 * duplicate a reference to a VM object. Note that the reference 346 * count must already be at least one (the passed in reference) so 347 * there is no chance of the uvn being killed or locked out here. 348 * 349 * => caller must call with object unlocked. 350 * => caller must be using the same accessprot as was used at attach time 351 */ 352 353 354 static void 355 uvn_reference(uobj) 356 struct uvm_object *uobj; 357 { 358 #ifdef DIAGNOSTIC 359 struct uvm_vnode *uvn = (struct uvm_vnode *) uobj; 360 #endif 361 UVMHIST_FUNC("uvn_reference"); UVMHIST_CALLED(maphist); 362 363 simple_lock(&uobj->vmobjlock); 364 #ifdef DIAGNOSTIC 365 if ((uvn->u_flags & UVM_VNODE_VALID) == 0) { 366 printf("uvn_reference: ref=%d, flags=0x%x\n", uvn->u_flags, 367 uobj->uo_refs); 368 panic("uvn_reference: invalid state"); 369 } 370 #endif 371 uobj->uo_refs++; 372 UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)", 373 uobj, uobj->uo_refs,0,0); 374 simple_unlock(&uobj->vmobjlock); 375 } 376 377 /* 378 * uvn_detach 379 * 380 * remove a reference to a VM object. 381 * 382 * => caller must call with object unlocked and map locked. 383 * => this starts the detach process, but doesn't have to finish it 384 * (async i/o could still be pending). 385 */ 386 static void 387 uvn_detach(uobj) 388 struct uvm_object *uobj; 389 { 390 struct uvm_vnode *uvn; 391 struct vnode *vp; 392 int oldflags; 393 UVMHIST_FUNC("uvn_detach"); UVMHIST_CALLED(maphist); 394 395 simple_lock(&uobj->vmobjlock); 396 397 UVMHIST_LOG(maphist," (uobj=0x%x) ref=%d", uobj,uobj->uo_refs,0,0); 398 uobj->uo_refs--; /* drop ref! */ 399 if (uobj->uo_refs) { /* still more refs */ 400 simple_unlock(&uobj->vmobjlock); 401 UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0); 402 return; 403 } 404 405 /* 406 * get other pointers ... 407 */ 408 409 uvn = (struct uvm_vnode *) uobj; 410 vp = (struct vnode *) uobj; 411 412 /* 413 * clear VTEXT flag now that there are no mappings left (VTEXT is used 414 * to keep an active text file from being overwritten). 415 */ 416 vp->v_flag &= ~VTEXT; 417 418 /* 419 * we just dropped the last reference to the uvn. see if we can 420 * let it "stick around". 421 */ 422 423 if (uvn->u_flags & UVM_VNODE_CANPERSIST) { 424 /* won't block */ 425 uvn_flush(uobj, 0, 0, PGO_DEACTIVATE|PGO_ALLPAGES); 426 simple_unlock(&uobj->vmobjlock); 427 vrele(vp); /* drop vnode reference */ 428 UVMHIST_LOG(maphist,"<- done/vrele! (persist)", 0,0,0,0); 429 return; 430 } 431 432 /* 433 * its a goner! 434 */ 435 436 UVMHIST_LOG(maphist," its a goner (flushing)!", 0,0,0,0); 437 438 uvn->u_flags |= UVM_VNODE_DYING; 439 440 /* 441 * even though we may unlock in flush, no one can gain a reference 442 * to us until we clear the "dying" flag [because it blocks 443 * attaches]. we will not do that until after we've disposed of all 444 * the pages with uvn_flush(). note that before the flush the only 445 * pages that could be marked PG_BUSY are ones that are in async 446 * pageout by the daemon. (there can't be any pending "get"'s 447 * because there are no references to the object). 448 */ 449 450 (void) uvn_flush(uobj, 0, 0, PGO_CLEANIT|PGO_FREE|PGO_ALLPAGES); 451 452 UVMHIST_LOG(maphist," its a goner (done flush)!", 0,0,0,0); 453 454 /* 455 * given the structure of this pager, the above flush request will 456 * create the following state: all the pages that were in the object 457 * have either been free'd or they are marked PG_BUSY|PG_RELEASED. 458 * the PG_BUSY bit was set either by us or the daemon for async I/O. 459 * in either case, if we have pages left we can't kill the object 460 * yet because i/o is pending. in this case we set the "relkill" 461 * flag which will cause pgo_releasepg to kill the object once all 462 * the I/O's are done [pgo_releasepg will be called from the aiodone 463 * routine or from the page daemon]. 464 */ 465 466 if (uobj->uo_npages) { /* I/O pending. iodone will free */ 467 #ifdef DIAGNOSTIC 468 /* 469 * XXXCDC: very unlikely to happen until we have async i/o 470 * so print a little info message in case it does. 471 */ 472 printf("uvn_detach: vn %p has pages left after flush - " 473 "relkill mode\n", uobj); 474 #endif 475 uvn->u_flags |= UVM_VNODE_RELKILL; 476 simple_unlock(&uobj->vmobjlock); 477 UVMHIST_LOG(maphist,"<- done! (releasepg will kill obj)", 0, 0, 478 0, 0); 479 return; 480 } 481 482 /* 483 * kill object now. note that we can't be on the sync q because 484 * all references are gone. 485 */ 486 if (uvn->u_flags & UVM_VNODE_WRITEABLE) { 487 simple_lock(&uvn_wl_lock); /* protect uvn_wlist */ 488 LIST_REMOVE(uvn, u_wlist); 489 simple_unlock(&uvn_wl_lock); 490 } 491 #ifdef DIAGNOSTIC 492 if (uobj->memq.tqh_first != NULL) 493 panic("uvn_deref: vnode VM object still has pages afer " 494 "syncio/free flush"); 495 #endif 496 oldflags = uvn->u_flags; 497 uvn->u_flags = 0; 498 simple_unlock(&uobj->vmobjlock); 499 500 /* wake up any sleepers */ 501 if (oldflags & UVM_VNODE_WANTED) 502 wakeup(uvn); 503 504 /* 505 * drop our reference to the vnode. 506 */ 507 vrele(vp); 508 UVMHIST_LOG(maphist,"<- done (vrele) final", 0,0,0,0); 509 510 return; 511 } 512 513 /* 514 * uvm_vnp_terminate: external hook to clear out a vnode's VM 515 * 516 * called in two cases: 517 * [1] when a persisting vnode vm object (i.e. one with a zero reference 518 * count) needs to be freed so that a vnode can be reused. this 519 * happens under "getnewvnode" in vfs_subr.c. if the vnode from 520 * the free list is still attached (i.e. not VBAD) then vgone is 521 * called. as part of the vgone trace this should get called to 522 * free the vm object. this is the common case. 523 * [2] when a filesystem is being unmounted by force (MNT_FORCE, 524 * "umount -f") the vgone() function is called on active vnodes 525 * on the mounted file systems to kill their data (the vnodes become 526 * "dead" ones [see src/sys/miscfs/deadfs/...]). that results in a 527 * call here (even if the uvn is still in use -- i.e. has a non-zero 528 * reference count). this case happens at "umount -f" and during a 529 * "reboot/halt" operation. 530 * 531 * => the caller must XLOCK and VOP_LOCK the vnode before calling us 532 * [protects us from getting a vnode that is already in the DYING 533 * state...] 534 * => unlike uvn_detach, this function must not return until all the 535 * uvn's pages are disposed of. 536 * => in case [2] the uvn is still alive after this call, but all I/O 537 * ops will fail (due to the backing vnode now being "dead"). this 538 * will prob. kill any process using the uvn due to pgo_get failing. 539 */ 540 541 void 542 uvm_vnp_terminate(vp) 543 struct vnode *vp; 544 { 545 struct uvm_vnode *uvn = &vp->v_uvm; 546 int oldflags; 547 UVMHIST_FUNC("uvm_vnp_terminate"); UVMHIST_CALLED(maphist); 548 549 /* 550 * lock object and check if it is valid 551 */ 552 simple_lock(&uvn->u_obj.vmobjlock); 553 UVMHIST_LOG(maphist, " vp=0x%x, ref=%d, flag=0x%x", vp, 554 uvn->u_obj.uo_refs, uvn->u_flags, 0); 555 if ((uvn->u_flags & UVM_VNODE_VALID) == 0) { 556 simple_unlock(&uvn->u_obj.vmobjlock); 557 UVMHIST_LOG(maphist, "<- done (not active)", 0, 0, 0, 0); 558 return; 559 } 560 561 /* 562 * must be a valid uvn that is not already dying (because XLOCK 563 * protects us from that). the uvn can't in the the ALOCK state 564 * because it is valid, and uvn's that are in the ALOCK state haven't 565 * been marked valid yet. 566 */ 567 568 #ifdef DEBUG 569 /* 570 * debug check: are we yanking the vnode out from under our uvn? 571 */ 572 if (uvn->u_obj.uo_refs) { 573 printf("uvm_vnp_terminate(%p): terminating active vnode " 574 "(refs=%d)\n", uvn, uvn->u_obj.uo_refs); 575 } 576 #endif 577 578 /* 579 * it is possible that the uvn was detached and is in the relkill 580 * state [i.e. waiting for async i/o to finish so that releasepg can 581 * kill object]. we take over the vnode now and cancel the relkill. 582 * we want to know when the i/o is done so we can recycle right 583 * away. note that a uvn can only be in the RELKILL state if it 584 * has a zero reference count. 585 */ 586 587 if (uvn->u_flags & UVM_VNODE_RELKILL) 588 uvn->u_flags &= ~UVM_VNODE_RELKILL; /* cancel RELKILL */ 589 590 /* 591 * block the uvn by setting the dying flag, and then flush the 592 * pages. (note that flush may unlock object while doing I/O, but 593 * it will re-lock it before it returns control here). 594 * 595 * also, note that we tell I/O that we are already VOP_LOCK'd so 596 * that uvn_io doesn't attempt to VOP_LOCK again. 597 * 598 * XXXCDC: setting VNISLOCKED on an active uvn which is being terminated 599 * due to a forceful unmount might not be a good idea. maybe we 600 * need a way to pass in this info to uvn_flush through a 601 * pager-defined PGO_ constant [currently there are none]. 602 */ 603 uvn->u_flags |= UVM_VNODE_DYING|UVM_VNODE_VNISLOCKED; 604 605 (void) uvn_flush(&uvn->u_obj, 0, 0, PGO_CLEANIT|PGO_FREE|PGO_ALLPAGES); 606 607 /* 608 * as we just did a flush we expect all the pages to be gone or in 609 * the process of going. sleep to wait for the rest to go [via iosync]. 610 */ 611 612 while (uvn->u_obj.uo_npages) { 613 #ifdef DIAGNOSTIC 614 struct vm_page *pp; 615 for (pp = uvn->u_obj.memq.tqh_first ; pp != NULL ; 616 pp = pp->listq.tqe_next) { 617 if ((pp->flags & PG_BUSY) == 0) 618 panic("uvm_vnp_terminate: detected unbusy pg"); 619 } 620 if (uvn->u_nio == 0) 621 panic("uvm_vnp_terminate: no I/O to wait for?"); 622 printf("uvm_vnp_terminate: waiting for I/O to fin.\n"); 623 /* 624 * XXXCDC: this is unlikely to happen without async i/o so we 625 * put a printf in just to keep an eye on it. 626 */ 627 #endif 628 uvn->u_flags |= UVM_VNODE_IOSYNC; 629 UVM_UNLOCK_AND_WAIT(&uvn->u_nio, &uvn->u_obj.vmobjlock, FALSE, 630 "uvn_term",0); 631 simple_lock(&uvn->u_obj.vmobjlock); 632 } 633 634 /* 635 * done. now we free the uvn if its reference count is zero 636 * (true if we are zapping a persisting uvn). however, if we are 637 * terminating a uvn with active mappings we let it live ... future 638 * calls down to the vnode layer will fail. 639 */ 640 641 oldflags = uvn->u_flags; 642 if (uvn->u_obj.uo_refs) { 643 644 /* 645 * uvn must live on it is dead-vnode state until all references 646 * are gone. restore flags. clear CANPERSIST state. 647 */ 648 649 uvn->u_flags &= ~(UVM_VNODE_DYING|UVM_VNODE_VNISLOCKED| 650 UVM_VNODE_WANTED|UVM_VNODE_CANPERSIST); 651 652 } else { 653 654 /* 655 * free the uvn now. note that the VREF reference is already 656 * gone [it is dropped when we enter the persist state]. 657 */ 658 if (uvn->u_flags & UVM_VNODE_IOSYNCWANTED) 659 panic("uvm_vnp_terminate: io sync wanted bit set"); 660 661 if (uvn->u_flags & UVM_VNODE_WRITEABLE) { 662 simple_lock(&uvn_wl_lock); 663 LIST_REMOVE(uvn, u_wlist); 664 simple_unlock(&uvn_wl_lock); 665 } 666 uvn->u_flags = 0; /* uvn is history, clear all bits */ 667 } 668 669 if (oldflags & UVM_VNODE_WANTED) 670 wakeup(uvn); /* object lock still held */ 671 672 simple_unlock(&uvn->u_obj.vmobjlock); 673 UVMHIST_LOG(maphist, "<- done", 0, 0, 0, 0); 674 675 } 676 677 /* 678 * uvn_releasepg: handled a released page in a uvn 679 * 680 * => "pg" is a PG_BUSY [caller owns it], PG_RELEASED page that we need 681 * to dispose of. 682 * => caller must handled PG_WANTED case 683 * => called with page's object locked, pageq's unlocked 684 * => returns TRUE if page's object is still alive, FALSE if we 685 * killed the page's object. if we return TRUE, then we 686 * return with the object locked. 687 * => if (nextpgp != NULL) => we return pageq.tqe_next here, and return 688 * with the page queues locked [for pagedaemon] 689 * => if (nextpgp == NULL) => we return with page queues unlocked [normal case] 690 * => we kill the uvn if it is not referenced and we are suppose to 691 * kill it ("relkill"). 692 */ 693 694 boolean_t 695 uvn_releasepg(pg, nextpgp) 696 struct vm_page *pg; 697 struct vm_page **nextpgp; /* OUT */ 698 { 699 struct uvm_vnode *uvn = (struct uvm_vnode *) pg->uobject; 700 #ifdef DIAGNOSTIC 701 if ((pg->flags & PG_RELEASED) == 0) 702 panic("uvn_releasepg: page not released!"); 703 #endif 704 705 /* 706 * dispose of the page [caller handles PG_WANTED] 707 */ 708 pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE); 709 uvm_lock_pageq(); 710 if (nextpgp) 711 *nextpgp = pg->pageq.tqe_next; /* next page for daemon */ 712 uvm_pagefree(pg); 713 if (!nextpgp) 714 uvm_unlock_pageq(); 715 716 /* 717 * now see if we need to kill the object 718 */ 719 if (uvn->u_flags & UVM_VNODE_RELKILL) { 720 if (uvn->u_obj.uo_refs) 721 panic("uvn_releasepg: kill flag set on referenced " 722 "object!"); 723 if (uvn->u_obj.uo_npages == 0) { 724 if (uvn->u_flags & UVM_VNODE_WRITEABLE) { 725 simple_lock(&uvn_wl_lock); 726 LIST_REMOVE(uvn, u_wlist); 727 simple_unlock(&uvn_wl_lock); 728 } 729 #ifdef DIAGNOSTIC 730 if (uvn->u_obj.memq.tqh_first) 731 panic("uvn_releasepg: pages in object with npages == 0"); 732 #endif 733 if (uvn->u_flags & UVM_VNODE_WANTED) 734 /* still holding object lock */ 735 wakeup(uvn); 736 737 uvn->u_flags = 0; /* DEAD! */ 738 simple_unlock(&uvn->u_obj.vmobjlock); 739 return (FALSE); 740 } 741 } 742 return (TRUE); 743 } 744 745 /* 746 * NOTE: currently we have to use VOP_READ/VOP_WRITE because they go 747 * through the buffer cache and allow I/O in any size. These VOPs use 748 * synchronous i/o. [vs. VOP_STRATEGY which can be async, but doesn't 749 * go through the buffer cache or allow I/O sizes larger than a 750 * block]. we will eventually want to change this. 751 * 752 * issues to consider: 753 * uvm provides the uvm_aiodesc structure for async i/o management. 754 * there are two tailq's in the uvm. structure... one for pending async 755 * i/o and one for "done" async i/o. to do an async i/o one puts 756 * an aiodesc on the "pending" list (protected by splbio()), starts the 757 * i/o and returns VM_PAGER_PEND. when the i/o is done, we expect 758 * some sort of "i/o done" function to be called (at splbio(), interrupt 759 * time). this function should remove the aiodesc from the pending list 760 * and place it on the "done" list and wakeup the daemon. the daemon 761 * will run at normal spl() and will remove all items from the "done" 762 * list and call the "aiodone" hook for each done request (see uvm_pager.c). 763 * [in the old vm code, this was done by calling the "put" routine with 764 * null arguments which made the code harder to read and understand because 765 * you had one function ("put") doing two things.] 766 * 767 * so the current pager needs: 768 * int uvn_aiodone(struct uvm_aiodesc *) 769 * 770 * => return KERN_SUCCESS (aio finished, free it). otherwise requeue for 771 * later collection. 772 * => called with pageq's locked by the daemon. 773 * 774 * general outline: 775 * - "try" to lock object. if fail, just return (will try again later) 776 * - drop "u_nio" (this req is done!) 777 * - if (object->iosync && u_naio == 0) { wakeup &uvn->u_naio } 778 * - get "page" structures (atop?). 779 * - handle "wanted" pages 780 * - handle "released" pages [using pgo_releasepg] 781 * >>> pgo_releasepg may kill the object 782 * dont forget to look at "object" wanted flag in all cases. 783 */ 784 785 786 /* 787 * uvn_flush: flush pages out of a uvm object. 788 * 789 * => object should be locked by caller. we may _unlock_ the object 790 * if (and only if) we need to clean a page (PGO_CLEANIT). 791 * we return with the object locked. 792 * => if PGO_CLEANIT is set, we may block (due to I/O). thus, a caller 793 * might want to unlock higher level resources (e.g. vm_map) 794 * before calling flush. 795 * => if PGO_CLEANIT is not set, then we will neither unlock the object 796 * or block. 797 * => if PGO_ALLPAGE is set, then all pages in the object are valid targets 798 * for flushing. 799 * => NOTE: we rely on the fact that the object's memq is a TAILQ and 800 * that new pages are inserted on the tail end of the list. thus, 801 * we can make a complete pass through the object in one go by starting 802 * at the head and working towards the tail (new pages are put in 803 * front of us). 804 * => NOTE: we are allowed to lock the page queues, so the caller 805 * must not be holding the lock on them [e.g. pagedaemon had 806 * better not call us with the queues locked] 807 * => we return TRUE unless we encountered some sort of I/O error 808 * 809 * comment on "cleaning" object and PG_BUSY pages: 810 * this routine is holding the lock on the object. the only time 811 * that it can run into a PG_BUSY page that it does not own is if 812 * some other process has started I/O on the page (e.g. either 813 * a pagein, or a pageout). if the PG_BUSY page is being paged 814 * in, then it can not be dirty (!PG_CLEAN) because no one has 815 * had a chance to modify it yet. if the PG_BUSY page is being 816 * paged out then it means that someone else has already started 817 * cleaning the page for us (how nice!). in this case, if we 818 * have syncio specified, then after we make our pass through the 819 * object we need to wait for the other PG_BUSY pages to clear 820 * off (i.e. we need to do an iosync). also note that once a 821 * page is PG_BUSY it must stay in its object until it is un-busyed. 822 * 823 * note on page traversal: 824 * we can traverse the pages in an object either by going down the 825 * linked list in "uobj->memq", or we can go over the address range 826 * by page doing hash table lookups for each address. depending 827 * on how many pages are in the object it may be cheaper to do one 828 * or the other. we set "by_list" to true if we are using memq. 829 * if the cost of a hash lookup was equal to the cost of the list 830 * traversal we could compare the number of pages in the start->stop 831 * range to the total number of pages in the object. however, it 832 * seems that a hash table lookup is more expensive than the linked 833 * list traversal, so we multiply the number of pages in the 834 * start->stop range by a penalty which we define below. 835 */ 836 837 #define UVN_HASH_PENALTY 4 /* XXX: a guess */ 838 839 static boolean_t 840 uvn_flush(uobj, start, stop, flags) 841 struct uvm_object *uobj; 842 vaddr_t start, stop; 843 int flags; 844 { 845 struct uvm_vnode *uvn = (struct uvm_vnode *) uobj; 846 struct vm_page *pp, *ppnext, *ptmp; 847 struct vm_page *pps[MAXBSIZE >> PAGE_SHIFT], **ppsp; 848 int npages, result, lcv; 849 boolean_t retval, need_iosync, by_list, needs_clean; 850 vaddr_t curoff; 851 u_short pp_version; 852 UVMHIST_FUNC("uvn_flush"); UVMHIST_CALLED(maphist); 853 854 curoff = 0; /* XXX: shut up gcc */ 855 /* 856 * get init vals and determine how we are going to traverse object 857 */ 858 859 need_iosync = FALSE; 860 retval = TRUE; /* return value */ 861 if (flags & PGO_ALLPAGES) { 862 start = 0; 863 stop = round_page(uvn->u_size); 864 by_list = TRUE; /* always go by the list */ 865 } else { 866 start = trunc_page(start); 867 stop = round_page(stop); 868 if (stop > round_page(uvn->u_size)) 869 printf("uvn_flush: strange, got an out of range " 870 "flush (fixed)\n"); 871 872 by_list = (uobj->uo_npages <= 873 ((stop - start) >> PAGE_SHIFT) * UVN_HASH_PENALTY); 874 } 875 876 UVMHIST_LOG(maphist, 877 " flush start=0x%x, stop=0x%x, by_list=%d, flags=0x%x", 878 start, stop, by_list, flags); 879 880 /* 881 * PG_CLEANCHK: this bit is used by the pgo_mk_pcluster function as 882 * a _hint_ as to how up to date the PG_CLEAN bit is. if the hint 883 * is wrong it will only prevent us from clustering... it won't break 884 * anything. we clear all PG_CLEANCHK bits here, and pgo_mk_pcluster 885 * will set them as it syncs PG_CLEAN. This is only an issue if we 886 * are looking at non-inactive pages (because inactive page's PG_CLEAN 887 * bit is always up to date since there are no mappings). 888 * [borrowed PG_CLEANCHK idea from FreeBSD VM] 889 */ 890 891 if ((flags & PGO_CLEANIT) != 0 && 892 uobj->pgops->pgo_mk_pcluster != NULL) { 893 if (by_list) { 894 for (pp = uobj->memq.tqh_first ; pp != NULL ; 895 pp = pp->listq.tqe_next) { 896 if (pp->offset < start || pp->offset >= stop) 897 continue; 898 pp->flags &= ~PG_CLEANCHK; 899 } 900 901 } else { /* by hash */ 902 for (curoff = start ; curoff < stop; 903 curoff += PAGE_SIZE) { 904 pp = uvm_pagelookup(uobj, curoff); 905 if (pp) 906 pp->flags &= ~PG_CLEANCHK; 907 } 908 } 909 } 910 911 /* 912 * now do it. note: we must update ppnext in body of loop or we 913 * will get stuck. we need to use ppnext because we may free "pp" 914 * before doing the next loop. 915 */ 916 917 if (by_list) { 918 pp = uobj->memq.tqh_first; 919 } else { 920 curoff = start; 921 pp = uvm_pagelookup(uobj, curoff); 922 } 923 924 ppnext = NULL; /* XXX: shut up gcc */ 925 ppsp = NULL; /* XXX: shut up gcc */ 926 uvm_lock_pageq(); /* page queues locked */ 927 928 /* locked: both page queues and uobj */ 929 for ( ; (by_list && pp != NULL) || 930 (!by_list && curoff < stop) ; pp = ppnext) { 931 932 if (by_list) { 933 934 /* 935 * range check 936 */ 937 938 if (pp->offset < start || pp->offset >= stop) { 939 ppnext = pp->listq.tqe_next; 940 continue; 941 } 942 943 } else { 944 945 /* 946 * null check 947 */ 948 949 curoff += PAGE_SIZE; 950 if (pp == NULL) { 951 if (curoff < stop) 952 ppnext = uvm_pagelookup(uobj, curoff); 953 continue; 954 } 955 956 } 957 958 /* 959 * handle case where we do not need to clean page (either 960 * because we are not clean or because page is not dirty or 961 * is busy): 962 * 963 * NOTE: we are allowed to deactivate a non-wired active 964 * PG_BUSY page, but once a PG_BUSY page is on the inactive 965 * queue it must stay put until it is !PG_BUSY (so as not to 966 * confuse pagedaemon). 967 */ 968 969 if ((flags & PGO_CLEANIT) == 0 || (pp->flags & PG_BUSY) != 0) { 970 needs_clean = FALSE; 971 if ((pp->flags & PG_BUSY) != 0 && 972 (flags & (PGO_CLEANIT|PGO_SYNCIO)) == 973 (PGO_CLEANIT|PGO_SYNCIO)) 974 need_iosync = TRUE; 975 } else { 976 /* 977 * freeing: nuke all mappings so we can sync 978 * PG_CLEAN bit with no race 979 */ 980 if ((pp->flags & PG_CLEAN) != 0 && 981 (flags & PGO_FREE) != 0 && 982 (pp->pqflags & PQ_ACTIVE) != 0) 983 pmap_page_protect(PMAP_PGARG(pp), VM_PROT_NONE); 984 if ((pp->flags & PG_CLEAN) != 0 && 985 pmap_is_modified(PMAP_PGARG(pp))) 986 pp->flags &= ~(PG_CLEAN); 987 pp->flags |= PG_CLEANCHK; /* update "hint" */ 988 989 needs_clean = ((pp->flags & PG_CLEAN) == 0); 990 } 991 992 /* 993 * if we don't need a clean... load ppnext and dispose of pp 994 */ 995 if (!needs_clean) { 996 /* load ppnext */ 997 if (by_list) 998 ppnext = pp->listq.tqe_next; 999 else { 1000 if (curoff < stop) 1001 ppnext = uvm_pagelookup(uobj, curoff); 1002 } 1003 1004 /* now dispose of pp */ 1005 if (flags & PGO_DEACTIVATE) { 1006 if ((pp->pqflags & PQ_INACTIVE) == 0 && 1007 pp->wire_count == 0) { 1008 pmap_page_protect(PMAP_PGARG(pp), 1009 VM_PROT_NONE); 1010 uvm_pagedeactivate(pp); 1011 } 1012 1013 } else if (flags & PGO_FREE) { 1014 if (pp->flags & PG_BUSY) { 1015 /* release busy pages */ 1016 pp->flags |= PG_RELEASED; 1017 } else { 1018 pmap_page_protect(PMAP_PGARG(pp), 1019 VM_PROT_NONE); 1020 /* removed page from object */ 1021 uvm_pagefree(pp); 1022 } 1023 } 1024 /* ppnext is valid so we can continue... */ 1025 continue; 1026 } 1027 1028 /* 1029 * pp points to a page in the locked object that we are 1030 * working on. if it is !PG_CLEAN,!PG_BUSY and we asked 1031 * for cleaning (PGO_CLEANIT). we clean it now. 1032 * 1033 * let uvm_pager_put attempted a clustered page out. 1034 * note: locked: uobj and page queues. 1035 */ 1036 1037 pp->flags |= PG_BUSY; /* we 'own' page now */ 1038 UVM_PAGE_OWN(pp, "uvn_flush"); 1039 pmap_page_protect(PMAP_PGARG(pp), VM_PROT_READ); 1040 pp_version = pp->version; 1041 ReTry: 1042 ppsp = pps; 1043 npages = sizeof(pps) / sizeof(struct vm_page *); 1044 1045 /* locked: page queues, uobj */ 1046 result = uvm_pager_put(uobj, pp, &ppsp, &npages, 1047 flags | PGO_DOACTCLUST, start, stop); 1048 /* unlocked: page queues, uobj */ 1049 1050 /* 1051 * at this point nothing is locked. if we did an async I/O 1052 * it is remotely possible for the async i/o to complete and 1053 * the page "pp" be freed or what not before we get a chance 1054 * to relock the object. in order to detect this, we have 1055 * saved the version number of the page in "pp_version". 1056 */ 1057 1058 /* relock! */ 1059 simple_lock(&uobj->vmobjlock); 1060 uvm_lock_pageq(); 1061 1062 /* 1063 * VM_PAGER_AGAIN: given the structure of this pager, this 1064 * can only happen when we are doing async I/O and can't 1065 * map the pages into kernel memory (pager_map) due to lack 1066 * of vm space. if this happens we drop back to sync I/O. 1067 */ 1068 1069 if (result == VM_PAGER_AGAIN) { 1070 /* 1071 * it is unlikely, but page could have been released 1072 * while we had the object lock dropped. we ignore 1073 * this now and retry the I/O. we will detect and 1074 * handle the released page after the syncio I/O 1075 * completes. 1076 */ 1077 #ifdef DIAGNOSTIC 1078 if (flags & PGO_SYNCIO) 1079 panic("uvn_flush: PGO_SYNCIO return 'try again' error (impossible)"); 1080 #endif 1081 flags |= PGO_SYNCIO; 1082 goto ReTry; 1083 } 1084 1085 /* 1086 * the cleaning operation is now done. finish up. note that 1087 * on error (!OK, !PEND) uvm_pager_put drops the cluster for us. 1088 * if success (OK, PEND) then uvm_pager_put returns the cluster 1089 * to us in ppsp/npages. 1090 */ 1091 1092 /* 1093 * for pending async i/o if we are not deactivating/freeing 1094 * we can move on to the next page. 1095 */ 1096 1097 if (result == VM_PAGER_PEND) { 1098 1099 if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) { 1100 /* 1101 * no per-page ops: refresh ppnext and continue 1102 */ 1103 if (by_list) { 1104 if (pp->version == pp_version) 1105 ppnext = pp->listq.tqe_next; 1106 else 1107 /* reset */ 1108 ppnext = uobj->memq.tqh_first; 1109 } else { 1110 if (curoff < stop) 1111 ppnext = uvm_pagelookup(uobj, 1112 curoff); 1113 } 1114 continue; 1115 } 1116 1117 /* need to do anything here? */ 1118 } 1119 1120 /* 1121 * need to look at each page of the I/O operation. we defer 1122 * processing "pp" until the last trip through this "for" loop 1123 * so that we can load "ppnext" for the main loop after we 1124 * play with the cluster pages [thus the "npages + 1" in the 1125 * loop below]. 1126 */ 1127 1128 for (lcv = 0 ; lcv < npages + 1 ; lcv++) { 1129 1130 /* 1131 * handle ppnext for outside loop, and saving pp 1132 * until the end. 1133 */ 1134 if (lcv < npages) { 1135 if (ppsp[lcv] == pp) 1136 continue; /* skip pp until the end */ 1137 ptmp = ppsp[lcv]; 1138 } else { 1139 ptmp = pp; 1140 1141 /* set up next page for outer loop */ 1142 if (by_list) { 1143 if (pp->version == pp_version) 1144 ppnext = pp->listq.tqe_next; 1145 else 1146 /* reset */ 1147 ppnext = uobj->memq.tqh_first; 1148 } else { 1149 if (curoff < stop) 1150 ppnext = uvm_pagelookup(uobj, curoff); 1151 } 1152 } 1153 1154 /* 1155 * verify the page didn't get moved while obj was 1156 * unlocked 1157 */ 1158 if (result == VM_PAGER_PEND && ptmp->uobject != uobj) 1159 continue; 1160 1161 /* 1162 * unbusy the page if I/O is done. note that for 1163 * pending I/O it is possible that the I/O op 1164 * finished before we relocked the object (in 1165 * which case the page is no longer busy). 1166 */ 1167 1168 if (result != VM_PAGER_PEND) { 1169 if (ptmp->flags & PG_WANTED) 1170 /* still holding object lock */ 1171 thread_wakeup(ptmp); 1172 1173 ptmp->flags &= ~(PG_WANTED|PG_BUSY); 1174 UVM_PAGE_OWN(ptmp, NULL); 1175 if (ptmp->flags & PG_RELEASED) { 1176 1177 /* pgo_releasepg wants this */ 1178 uvm_unlock_pageq(); 1179 if (!uvn_releasepg(ptmp, NULL)) 1180 return (TRUE); 1181 1182 uvm_lock_pageq(); /* relock */ 1183 continue; /* next page */ 1184 1185 } else { 1186 ptmp->flags |= (PG_CLEAN|PG_CLEANCHK); 1187 if ((flags & PGO_FREE) == 0) 1188 pmap_clear_modify( 1189 PMAP_PGARG(ptmp)); 1190 } 1191 } 1192 1193 /* 1194 * dispose of page 1195 */ 1196 1197 if (flags & PGO_DEACTIVATE) { 1198 if ((pp->pqflags & PQ_INACTIVE) == 0 && 1199 pp->wire_count == 0) { 1200 pmap_page_protect(PMAP_PGARG(ptmp), 1201 VM_PROT_NONE); 1202 uvm_pagedeactivate(ptmp); 1203 } 1204 1205 } else if (flags & PGO_FREE) { 1206 if (result == VM_PAGER_PEND) { 1207 if ((ptmp->flags & PG_BUSY) != 0) 1208 /* signal for i/o done */ 1209 ptmp->flags |= PG_RELEASED; 1210 } else { 1211 if (result != VM_PAGER_OK) { 1212 printf("uvn_flush: obj=%p, " 1213 "offset=0x%lx. error " 1214 "during pageout.\n", 1215 pp->uobject, pp->offset); 1216 printf("uvn_flush: WARNING: " 1217 "changes to page may be " 1218 "lost!\n"); 1219 retval = FALSE; 1220 } 1221 pmap_page_protect(PMAP_PGARG(ptmp), 1222 VM_PROT_NONE); 1223 uvm_pagefree(ptmp); 1224 } 1225 } 1226 1227 } /* end of "lcv" for loop */ 1228 1229 } /* end of "pp" for loop */ 1230 1231 /* 1232 * done with pagequeues: unlock 1233 */ 1234 uvm_unlock_pageq(); 1235 1236 /* 1237 * now wait for all I/O if required. 1238 */ 1239 if (need_iosync) { 1240 1241 UVMHIST_LOG(maphist," <<DOING IOSYNC>>",0,0,0,0); 1242 while (uvn->u_nio != 0) { 1243 uvn->u_flags |= UVM_VNODE_IOSYNC; 1244 UVM_UNLOCK_AND_WAIT(&uvn->u_nio, &uvn->u_obj.vmobjlock, 1245 FALSE, "uvn_flush",0); 1246 simple_lock(&uvn->u_obj.vmobjlock); 1247 } 1248 if (uvn->u_flags & UVM_VNODE_IOSYNCWANTED) 1249 wakeup(&uvn->u_flags); 1250 uvn->u_flags &= ~(UVM_VNODE_IOSYNC|UVM_VNODE_IOSYNCWANTED); 1251 } 1252 1253 /* return, with object locked! */ 1254 UVMHIST_LOG(maphist,"<- done (retval=0x%x)",retval,0,0,0); 1255 return(retval); 1256 } 1257 1258 /* 1259 * uvn_cluster 1260 * 1261 * we are about to do I/O in an object at offset. this function is called 1262 * to establish a range of offsets around "offset" in which we can cluster 1263 * I/O. 1264 * 1265 * - currently doesn't matter if obj locked or not. 1266 */ 1267 1268 static void 1269 uvn_cluster(uobj, offset, loffset, hoffset) 1270 struct uvm_object *uobj; 1271 vaddr_t offset; 1272 vaddr_t *loffset, *hoffset; /* OUT */ 1273 { 1274 struct uvm_vnode *uvn = (struct uvm_vnode *) uobj; 1275 *loffset = offset; 1276 1277 if (*loffset >= uvn->u_size) 1278 panic("uvn_cluster: offset out of range"); 1279 1280 /* 1281 * XXX: old pager claims we could use VOP_BMAP to get maxcontig value. 1282 */ 1283 *hoffset = *loffset + MAXBSIZE; 1284 if (*hoffset > round_page(uvn->u_size)) /* past end? */ 1285 *hoffset = round_page(uvn->u_size); 1286 1287 return; 1288 } 1289 1290 /* 1291 * uvn_put: flush page data to backing store. 1292 * 1293 * => prefer map unlocked (not required) 1294 * => object must be locked! we will _unlock_ it before starting I/O. 1295 * => flags: PGO_SYNCIO -- use sync. I/O 1296 * => note: caller must set PG_CLEAN and pmap_clear_modify (if needed) 1297 * => XXX: currently we use VOP_READ/VOP_WRITE which are only sync. 1298 * [thus we never do async i/o! see iodone comment] 1299 */ 1300 1301 static int 1302 uvn_put(uobj, pps, npages, flags) 1303 struct uvm_object *uobj; 1304 struct vm_page **pps; 1305 int npages, flags; 1306 { 1307 int retval; 1308 1309 /* note: object locked */ 1310 retval = uvn_io((struct uvm_vnode*)uobj, pps, npages, flags, UIO_WRITE); 1311 /* note: object unlocked */ 1312 1313 return(retval); 1314 } 1315 1316 1317 /* 1318 * uvn_get: get pages (synchronously) from backing store 1319 * 1320 * => prefer map unlocked (not required) 1321 * => object must be locked! we will _unlock_ it before starting any I/O. 1322 * => flags: PGO_ALLPAGES: get all of the pages 1323 * PGO_LOCKED: fault data structures are locked 1324 * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx] 1325 * => NOTE: caller must check for released pages!! 1326 */ 1327 1328 static int 1329 uvn_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags) 1330 struct uvm_object *uobj; 1331 vaddr_t offset; 1332 struct vm_page **pps; /* IN/OUT */ 1333 int *npagesp; /* IN (OUT if PGO_LOCKED) */ 1334 int centeridx, advice, flags; 1335 vm_prot_t access_type; 1336 { 1337 vaddr_t current_offset; 1338 struct vm_page *ptmp; 1339 int lcv, result, gotpages; 1340 boolean_t done; 1341 UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(maphist); 1342 UVMHIST_LOG(maphist, "flags=%d", flags,0,0,0); 1343 1344 /* 1345 * step 1: handled the case where fault data structures are locked. 1346 */ 1347 1348 if (flags & PGO_LOCKED) { 1349 1350 /* 1351 * gotpages is the current number of pages we've gotten (which 1352 * we pass back up to caller via *npagesp. 1353 */ 1354 1355 gotpages = 0; 1356 1357 /* 1358 * step 1a: get pages that are already resident. only do this 1359 * if the data structures are locked (i.e. the first time 1360 * through). 1361 */ 1362 1363 done = TRUE; /* be optimistic */ 1364 1365 for (lcv = 0, current_offset = offset ; lcv < *npagesp ; 1366 lcv++, current_offset += PAGE_SIZE) { 1367 1368 /* do we care about this page? if not, skip it */ 1369 if (pps[lcv] == PGO_DONTCARE) 1370 continue; 1371 1372 /* lookup page */ 1373 ptmp = uvm_pagelookup(uobj, current_offset); 1374 1375 /* to be useful must get a non-busy, non-released pg */ 1376 if (ptmp == NULL || 1377 (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) { 1378 if (lcv == centeridx || (flags & PGO_ALLPAGES) 1379 != 0) 1380 done = FALSE; /* need to do a wait or I/O! */ 1381 continue; 1382 } 1383 1384 /* 1385 * useful page: busy/lock it and plug it in our 1386 * result array 1387 */ 1388 ptmp->flags |= PG_BUSY; /* loan up to caller */ 1389 UVM_PAGE_OWN(ptmp, "uvn_get1"); 1390 pps[lcv] = ptmp; 1391 gotpages++; 1392 1393 } /* "for" lcv loop */ 1394 1395 /* 1396 * XXX: given the "advice", should we consider async read-ahead? 1397 * XXX: fault current does deactive of pages behind us. is 1398 * this good (other callers might now). 1399 */ 1400 /* 1401 * XXX: read-ahead currently handled by buffer cache (bread) 1402 * level. 1403 * XXX: no async i/o available. 1404 * XXX: so we don't do anything now. 1405 */ 1406 1407 /* 1408 * step 1c: now we've either done everything needed or we to 1409 * unlock and do some waiting or I/O. 1410 */ 1411 1412 *npagesp = gotpages; /* let caller know */ 1413 if (done) 1414 return(VM_PAGER_OK); /* bingo! */ 1415 else 1416 /* EEK! Need to unlock and I/O */ 1417 return(VM_PAGER_UNLOCK); 1418 } 1419 1420 /* 1421 * step 2: get non-resident or busy pages. 1422 * object is locked. data structures are unlocked. 1423 * 1424 * XXX: because we can't do async I/O at this level we get things 1425 * page at a time (otherwise we'd chunk). the VOP_READ() will do 1426 * async-read-ahead for us at a lower level. 1427 */ 1428 1429 for (lcv = 0, current_offset = offset ; 1430 lcv < *npagesp ; lcv++, current_offset += PAGE_SIZE) { 1431 1432 /* skip over pages we've already gotten or don't want */ 1433 /* skip over pages we don't _have_ to get */ 1434 if (pps[lcv] != NULL || (lcv != centeridx && 1435 (flags & PGO_ALLPAGES) == 0)) 1436 continue; 1437 1438 /* 1439 * we have yet to locate the current page (pps[lcv]). we first 1440 * look for a page that is already at the current offset. if 1441 * we fine a page, we check to see if it is busy or released. 1442 * if that is the case, then we sleep on the page until it is 1443 * no longer busy or released and repeat the lookup. if the 1444 * page we found is neither busy nor released, then we busy it 1445 * (so we own it) and plug it into pps[lcv]. this breaks the 1446 * following while loop and indicates we are ready to move on 1447 * to the next page in the "lcv" loop above. 1448 * 1449 * if we exit the while loop with pps[lcv] still set to NULL, 1450 * then it means that we allocated a new busy/fake/clean page 1451 * ptmp in the object and we need to do I/O to fill in the data. 1452 */ 1453 1454 while (pps[lcv] == NULL) { /* top of "pps" while loop */ 1455 1456 /* look for a current page */ 1457 ptmp = uvm_pagelookup(uobj, current_offset); 1458 1459 /* nope? allocate one now (if we can) */ 1460 if (ptmp == NULL) { 1461 1462 ptmp = uvm_pagealloc(uobj, current_offset, 1463 NULL); /* alloc */ 1464 1465 /* out of RAM? */ 1466 if (ptmp == NULL) { 1467 simple_unlock(&uobj->vmobjlock); 1468 uvm_wait("uvn_getpage"); 1469 simple_lock(&uobj->vmobjlock); 1470 1471 /* goto top of pps while loop */ 1472 continue; 1473 } 1474 1475 /* 1476 * got new page ready for I/O. break pps 1477 * while loop. pps[lcv] is still NULL. 1478 */ 1479 break; 1480 } 1481 1482 /* page is there, see if we need to wait on it */ 1483 if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) { 1484 ptmp->flags |= PG_WANTED; 1485 UVM_UNLOCK_AND_WAIT(ptmp, 1486 &uobj->vmobjlock, 0, "uvn_get",0); 1487 simple_lock(&uobj->vmobjlock); 1488 continue; /* goto top of pps while loop */ 1489 } 1490 1491 /* 1492 * if we get here then the page has become resident 1493 * and unbusy between steps 1 and 2. we busy it 1494 * now (so we own it) and set pps[lcv] (so that we 1495 * exit the while loop). 1496 */ 1497 ptmp->flags |= PG_BUSY; 1498 UVM_PAGE_OWN(ptmp, "uvn_get2"); 1499 pps[lcv] = ptmp; 1500 } 1501 1502 /* 1503 * if we own the a valid page at the correct offset, pps[lcv] 1504 * will point to it. nothing more to do except go to the 1505 * next page. 1506 */ 1507 1508 if (pps[lcv]) 1509 continue; /* next lcv */ 1510 1511 /* 1512 * we have a "fake/busy/clean" page that we just allocated. do 1513 * I/O to fill it with valid data. note that object must be 1514 * locked going into uvn_io, but will be unlocked afterwards. 1515 */ 1516 1517 result = uvn_io((struct uvm_vnode *) uobj, &ptmp, 1, 1518 PGO_SYNCIO, UIO_READ); 1519 1520 /* 1521 * I/O done. object is unlocked (by uvn_io). because we used 1522 * syncio the result can not be PEND or AGAIN. we must relock 1523 * and check for errors. 1524 */ 1525 1526 /* lock object. check for errors. */ 1527 simple_lock(&uobj->vmobjlock); 1528 if (result != VM_PAGER_OK) { 1529 if (ptmp->flags & PG_WANTED) 1530 /* object lock still held */ 1531 thread_wakeup(ptmp); 1532 1533 ptmp->flags &= ~(PG_WANTED|PG_BUSY); 1534 UVM_PAGE_OWN(ptmp, NULL); 1535 uvm_lock_pageq(); 1536 uvm_pagefree(ptmp); 1537 uvm_unlock_pageq(); 1538 simple_unlock(&uobj->vmobjlock); 1539 return(result); 1540 } 1541 1542 /* 1543 * we got the page! clear the fake flag (indicates valid 1544 * data now in page) and plug into our result array. note 1545 * that page is still busy. 1546 * 1547 * it is the callers job to: 1548 * => check if the page is released 1549 * => unbusy the page 1550 * => activate the page 1551 */ 1552 1553 ptmp->flags &= ~PG_FAKE; /* data is valid ... */ 1554 pmap_clear_modify(PMAP_PGARG(ptmp)); /* ... and clean */ 1555 pps[lcv] = ptmp; 1556 1557 } /* lcv loop */ 1558 1559 /* 1560 * finally, unlock object and return. 1561 */ 1562 1563 simple_unlock(&uobj->vmobjlock); 1564 return (VM_PAGER_OK); 1565 } 1566 1567 /* 1568 * uvn_asyncget: start async I/O to bring pages into ram 1569 * 1570 * => caller must lock object(???XXX: see if this is best) 1571 * => could be called from uvn_get or a madvise() fault-ahead. 1572 * => if it fails, it doesn't matter. 1573 */ 1574 1575 static int 1576 uvn_asyncget(uobj, offset, npages) 1577 struct uvm_object *uobj; 1578 vaddr_t offset; 1579 int npages; 1580 { 1581 1582 /* 1583 * XXXCDC: we can't do async I/O yet 1584 */ 1585 printf("uvn_asyncget called\n"); 1586 return (KERN_SUCCESS); 1587 } 1588 1589 /* 1590 * uvn_io: do I/O to a vnode 1591 * 1592 * => prefer map unlocked (not required) 1593 * => object must be locked! we will _unlock_ it before starting I/O. 1594 * => flags: PGO_SYNCIO -- use sync. I/O 1595 * => XXX: currently we use VOP_READ/VOP_WRITE which are only sync. 1596 * [thus we never do async i/o! see iodone comment] 1597 */ 1598 1599 static int 1600 uvn_io(uvn, pps, npages, flags, rw) 1601 struct uvm_vnode *uvn; 1602 vm_page_t *pps; 1603 int npages, flags, rw; 1604 { 1605 struct vnode *vn; 1606 struct uio uio; 1607 struct iovec iov; 1608 vaddr_t kva, file_offset; 1609 int waitf, result, got, wanted; 1610 UVMHIST_FUNC("uvn_io"); UVMHIST_CALLED(maphist); 1611 1612 UVMHIST_LOG(maphist, "rw=%d", rw,0,0,0); 1613 1614 /* 1615 * init values 1616 */ 1617 1618 waitf = (flags & PGO_SYNCIO) ? M_WAITOK : M_NOWAIT; 1619 vn = (struct vnode *) uvn; 1620 file_offset = pps[0]->offset; 1621 1622 /* 1623 * check for sync'ing I/O. 1624 */ 1625 1626 while (uvn->u_flags & UVM_VNODE_IOSYNC) { 1627 if (waitf == M_NOWAIT) { 1628 simple_unlock(&uvn->u_obj.vmobjlock); 1629 UVMHIST_LOG(maphist,"<- try again (iosync)",0,0,0,0); 1630 return(VM_PAGER_AGAIN); 1631 } 1632 uvn->u_flags |= UVM_VNODE_IOSYNCWANTED; 1633 UVM_UNLOCK_AND_WAIT(&uvn->u_flags, &uvn->u_obj.vmobjlock, 1634 FALSE, "uvn_iosync",0); 1635 simple_lock(&uvn->u_obj.vmobjlock); 1636 } 1637 1638 /* 1639 * check size 1640 */ 1641 1642 if (file_offset >= uvn->u_size) { 1643 simple_unlock(&uvn->u_obj.vmobjlock); 1644 UVMHIST_LOG(maphist,"<- BAD (size check)",0,0,0,0); 1645 #ifdef DIAGNOSTIC 1646 printf("uvn_io: note: size check fired\n"); 1647 #endif 1648 return(VM_PAGER_BAD); 1649 } 1650 1651 /* 1652 * first try and map the pages in (without waiting) 1653 */ 1654 1655 kva = uvm_pagermapin(pps, npages, NULL, M_NOWAIT); 1656 if (kva == NULL && waitf == M_NOWAIT) { 1657 simple_unlock(&uvn->u_obj.vmobjlock); 1658 UVMHIST_LOG(maphist,"<- mapin failed (try again)",0,0,0,0); 1659 return(VM_PAGER_AGAIN); 1660 } 1661 1662 /* 1663 * ok, now bump u_nio up. at this point we are done with uvn 1664 * and can unlock it. if we still don't have a kva, try again 1665 * (this time with sleep ok). 1666 */ 1667 1668 uvn->u_nio++; /* we have an I/O in progress! */ 1669 simple_unlock(&uvn->u_obj.vmobjlock); 1670 /* NOTE: object now unlocked */ 1671 if (kva == NULL) { 1672 kva = uvm_pagermapin(pps, npages, NULL, M_WAITOK); 1673 } 1674 1675 /* 1676 * ok, mapped in. our pages are PG_BUSY so they are not going to 1677 * get touched (so we can look at "offset" without having to lock 1678 * the object). set up for I/O. 1679 */ 1680 1681 /* 1682 * fill out uio/iov 1683 */ 1684 1685 iov.iov_base = (caddr_t) kva; 1686 wanted = npages << PAGE_SHIFT; 1687 if (file_offset + wanted > uvn->u_size) 1688 wanted = uvn->u_size - file_offset; /* XXX: needed? */ 1689 iov.iov_len = wanted; 1690 uio.uio_iov = &iov; 1691 uio.uio_iovcnt = 1; 1692 uio.uio_offset = file_offset; 1693 uio.uio_segflg = UIO_SYSSPACE; 1694 uio.uio_rw = rw; 1695 uio.uio_resid = wanted; 1696 uio.uio_procp = NULL; 1697 1698 /* 1699 * do the I/O! (XXX: curproc?) 1700 */ 1701 1702 UVMHIST_LOG(maphist, "calling VOP",0,0,0,0); 1703 1704 if ((uvn->u_flags & UVM_VNODE_VNISLOCKED) == 0) 1705 vn_lock(vn, LK_EXCLUSIVE | LK_RETRY); 1706 /* NOTE: vnode now locked! */ 1707 1708 if (rw == UIO_READ) 1709 result = VOP_READ(vn, &uio, 0, curproc->p_ucred); 1710 else 1711 result = VOP_WRITE(vn, &uio, 0, curproc->p_ucred); 1712 1713 if ((uvn->u_flags & UVM_VNODE_VNISLOCKED) == 0) 1714 VOP_UNLOCK(vn, 0); 1715 /* NOTE: vnode now unlocked (unless vnislocked) */ 1716 1717 UVMHIST_LOG(maphist, "done calling VOP",0,0,0,0); 1718 1719 /* 1720 * result == unix style errno (0 == OK!) 1721 * 1722 * zero out rest of buffer (if needed) 1723 */ 1724 1725 if (result == 0) { 1726 got = wanted - uio.uio_resid; 1727 1728 if (wanted && got == 0) { 1729 result = EIO; /* XXX: error? */ 1730 } else if (got < PAGE_SIZE * npages && rw == UIO_READ) { 1731 memset((void *) (kva + got), 0, 1732 (npages << PAGE_SHIFT) - got); 1733 } 1734 } 1735 1736 /* 1737 * now remove pager mapping 1738 */ 1739 uvm_pagermapout(kva, npages); 1740 1741 /* 1742 * now clean up the object (i.e. drop I/O count) 1743 */ 1744 1745 simple_lock(&uvn->u_obj.vmobjlock); 1746 /* NOTE: object now locked! */ 1747 1748 uvn->u_nio--; /* I/O DONE! */ 1749 if ((uvn->u_flags & UVM_VNODE_IOSYNC) != 0 && uvn->u_nio == 0) { 1750 wakeup(&uvn->u_nio); 1751 } 1752 simple_unlock(&uvn->u_obj.vmobjlock); 1753 /* NOTE: object now unlocked! */ 1754 1755 /* 1756 * done! 1757 */ 1758 1759 UVMHIST_LOG(maphist, "<- done (result %d)", result,0,0,0); 1760 if (result == 0) 1761 return(VM_PAGER_OK); 1762 else 1763 return(VM_PAGER_ERROR); 1764 } 1765 1766 /* 1767 * uvm_vnp_uncache: disable "persisting" in a vnode... when last reference 1768 * is gone we will kill the object (flushing dirty pages back to the vnode 1769 * if needed). 1770 * 1771 * => returns TRUE if there was no uvm_object attached or if there was 1772 * one and we killed it [i.e. if there is no active uvn] 1773 * => called with the vnode VOP_LOCK'd [we will unlock it for I/O, if 1774 * needed] 1775 * 1776 * => XXX: given that we now kill uvn's when a vnode is recycled (without 1777 * having to hold a reference on the vnode) and given a working 1778 * uvm_vnp_sync(), how does that effect the need for this function? 1779 * [XXXCDC: seems like it can die?] 1780 * 1781 * => XXX: this function should DIE once we merge the VM and buffer 1782 * cache. 1783 * 1784 * research shows that this is called in the following places: 1785 * ext2fs_truncate, ffs_truncate, detrunc[msdosfs]: called when vnode 1786 * changes sizes 1787 * ext2fs_write, WRITE [ufs_readwrite], msdosfs_write: called when we 1788 * are written to 1789 * ex2fs_chmod, ufs_chmod: called if VTEXT vnode and the sticky bit 1790 * is off 1791 * ffs_realloccg: when we can't extend the current block and have 1792 * to allocate a new one we call this [XXX: why?] 1793 * nfsrv_rename, rename_files: called when the target filename is there 1794 * and we want to remove it 1795 * nfsrv_remove, sys_unlink: called on file we are removing 1796 * nfsrv_access: if VTEXT and we want WRITE access and we don't uncache 1797 * then return "text busy" 1798 * nfs_open: seems to uncache any file opened with nfs 1799 * vn_writechk: if VTEXT vnode and can't uncache return "text busy" 1800 */ 1801 1802 boolean_t 1803 uvm_vnp_uncache(vp) 1804 struct vnode *vp; 1805 { 1806 struct uvm_vnode *uvn = &vp->v_uvm; 1807 1808 /* 1809 * lock uvn part of the vnode and check to see if we need to do anything 1810 */ 1811 1812 simple_lock(&uvn->u_obj.vmobjlock); 1813 if ((uvn->u_flags & UVM_VNODE_VALID) == 0 || 1814 (uvn->u_flags & UVM_VNODE_BLOCKED) != 0) { 1815 simple_unlock(&uvn->u_obj.vmobjlock); 1816 return(TRUE); 1817 } 1818 1819 /* 1820 * we have a valid, non-blocked uvn. clear persist flag. 1821 * if uvn is currently active we can return now. 1822 */ 1823 1824 uvn->u_flags &= ~UVM_VNODE_CANPERSIST; 1825 if (uvn->u_obj.uo_refs) { 1826 simple_unlock(&uvn->u_obj.vmobjlock); 1827 return(FALSE); 1828 } 1829 1830 /* 1831 * uvn is currently persisting! we have to gain a reference to 1832 * it so that we can call uvn_detach to kill the uvn. 1833 */ 1834 1835 VREF(vp); /* seems ok, even with VOP_LOCK */ 1836 uvn->u_obj.uo_refs++; /* value is now 1 */ 1837 simple_unlock(&uvn->u_obj.vmobjlock); 1838 1839 1840 #ifdef DEBUG 1841 /* 1842 * carry over sanity check from old vnode pager: the vnode should 1843 * be VOP_LOCK'd, and we confirm it here. 1844 */ 1845 if (!VOP_ISLOCKED(vp)) { 1846 boolean_t is_ok_anyway = FALSE; 1847 #ifdef NFS 1848 extern int (**nfsv2_vnodeop_p) __P((void *)); 1849 extern int (**spec_nfsv2nodeop_p) __P((void *)); 1850 extern int (**fifo_nfsv2nodeop_p) __P((void *)); 1851 1852 /* vnode is NOT VOP_LOCKed: some vnode types _never_ lock */ 1853 if (vp->v_op == nfsv2_vnodeop_p || 1854 vp->v_op == spec_nfsv2nodeop_p) { 1855 is_ok_anyway = TRUE; 1856 } 1857 if (vp->v_op == fifo_nfsv2nodeop_p) { 1858 is_ok_anyway = TRUE; 1859 } 1860 #endif /* NFS */ 1861 if (!is_ok_anyway) 1862 panic("uvm_vnp_uncache: vnode not locked!"); 1863 } 1864 #endif /* DEBUG */ 1865 1866 /* 1867 * now drop our reference to the vnode. if we have the sole 1868 * reference to the vnode then this will cause it to die [as we 1869 * just cleared the persist flag]. we have to unlock the vnode 1870 * while we are doing this as it may trigger I/O. 1871 * 1872 * XXX: it might be possible for uvn to get reclaimed while we are 1873 * unlocked causing us to return TRUE when we should not. we ignore 1874 * this as a false-positive return value doesn't hurt us. 1875 */ 1876 VOP_UNLOCK(vp, 0); 1877 uvn_detach(&uvn->u_obj); 1878 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); 1879 1880 /* 1881 * and return... 1882 */ 1883 1884 return(TRUE); 1885 } 1886 1887 /* 1888 * uvm_vnp_setsize: grow or shrink a vnode uvn 1889 * 1890 * grow => just update size value 1891 * shrink => toss un-needed pages 1892 * 1893 * => we assume that the caller has a reference of some sort to the 1894 * vnode in question so that it will not be yanked out from under 1895 * us. 1896 * 1897 * called from: 1898 * => truncate fns (ext2fs_truncate, ffs_truncate, detrunc[msdos]) 1899 * => "write" fns (ext2fs_write, WRITE [ufs/ufs], msdosfs_write, nfs_write) 1900 * => ffs_balloc [XXX: why? doesn't WRITE handle?] 1901 * => NFS: nfs_loadattrcache, nfs_getattrcache, nfs_setattr 1902 * => union fs: union_newsize 1903 */ 1904 1905 void 1906 uvm_vnp_setsize(vp, newsize) 1907 struct vnode *vp; 1908 u_quad_t newsize; 1909 { 1910 struct uvm_vnode *uvn = &vp->v_uvm; 1911 1912 /* 1913 * lock uvn and check for valid object, and if valid: do it! 1914 */ 1915 simple_lock(&uvn->u_obj.vmobjlock); 1916 if (uvn->u_flags & UVM_VNODE_VALID) { 1917 1918 /* 1919 * make sure that the newsize fits within a vaddr_t 1920 * XXX: need to revise addressing data types 1921 */ 1922 1923 if (newsize > (vaddr_t) -PAGE_SIZE) { 1924 #ifdef DEBUG 1925 printf("uvm_vnp_setsize: vn %p size truncated " 1926 "%qx->%lx\n", vp, (long long)newsize, 1927 (vaddr_t)-PAGE_SIZE); 1928 #endif 1929 newsize = (vaddr_t)-PAGE_SIZE; 1930 } 1931 1932 /* 1933 * now check if the size has changed: if we shrink we had better 1934 * toss some pages... 1935 */ 1936 1937 if (uvn->u_size > newsize) { 1938 (void)uvn_flush(&uvn->u_obj, (vaddr_t) newsize, 1939 uvn->u_size, PGO_FREE); 1940 } 1941 uvn->u_size = (vaddr_t)newsize; 1942 } 1943 simple_unlock(&uvn->u_obj.vmobjlock); 1944 1945 /* 1946 * done 1947 */ 1948 return; 1949 } 1950 1951 /* 1952 * uvm_vnp_sync: flush all dirty VM pages back to their backing vnodes. 1953 * 1954 * => called from sys_sync with no VM structures locked 1955 * => only one process can do a sync at a time (because the uvn 1956 * structure only has one queue for sync'ing). we ensure this 1957 * by holding the uvn_sync_lock while the sync is in progress. 1958 * other processes attempting a sync will sleep on this lock 1959 * until we are done. 1960 */ 1961 1962 void 1963 uvm_vnp_sync(mp) 1964 struct mount *mp; 1965 { 1966 struct uvm_vnode *uvn; 1967 struct vnode *vp; 1968 boolean_t got_lock; 1969 1970 /* 1971 * step 1: ensure we are only ones using the uvn_sync_q by locking 1972 * our lock... 1973 */ 1974 lockmgr(&uvn_sync_lock, LK_EXCLUSIVE, (void *)0); 1975 1976 /* 1977 * step 2: build up a simpleq of uvns of interest based on the 1978 * write list. we gain a reference to uvns of interest. must 1979 * be careful about locking uvn's since we will be holding uvn_wl_lock 1980 * in the body of the loop. 1981 */ 1982 SIMPLEQ_INIT(&uvn_sync_q); 1983 simple_lock(&uvn_wl_lock); 1984 for (uvn = uvn_wlist.lh_first ; uvn != NULL ; 1985 uvn = uvn->u_wlist.le_next) { 1986 1987 vp = (struct vnode *) uvn; 1988 if (mp && vp->v_mount != mp) 1989 continue; 1990 1991 /* attempt to gain reference */ 1992 while ((got_lock = simple_lock_try(&uvn->u_obj.vmobjlock)) == 1993 FALSE && 1994 (uvn->u_flags & UVM_VNODE_BLOCKED) == 0) 1995 /* spin */ ; 1996 1997 /* 1998 * we will exit the loop if either if the following are true: 1999 * - we got the lock [always true if NCPU == 1] 2000 * - we failed to get the lock but noticed the vnode was 2001 * "blocked" -- in this case the vnode must be a dying 2002 * vnode, and since dying vnodes are in the process of 2003 * being flushed out, we can safely skip this one 2004 * 2005 * we want to skip over the vnode if we did not get the lock, 2006 * or if the vnode is already dying (due to the above logic). 2007 * 2008 * note that uvn must already be valid because we found it on 2009 * the wlist (this also means it can't be ALOCK'd). 2010 */ 2011 if (!got_lock || (uvn->u_flags & UVM_VNODE_BLOCKED) != 0) { 2012 if (got_lock) 2013 simple_unlock(&uvn->u_obj.vmobjlock); 2014 continue; /* skip it */ 2015 } 2016 2017 /* 2018 * gain reference. watch out for persisting uvns (need to 2019 * regain vnode REF). 2020 */ 2021 if (uvn->u_obj.uo_refs == 0) 2022 VREF(vp); 2023 uvn->u_obj.uo_refs++; 2024 simple_unlock(&uvn->u_obj.vmobjlock); 2025 2026 /* 2027 * got it! 2028 */ 2029 SIMPLEQ_INSERT_HEAD(&uvn_sync_q, uvn, u_syncq); 2030 } 2031 simple_unlock(&uvn_wl_lock); 2032 2033 /* 2034 * step 3: we now have a list of uvn's that may need cleaning. 2035 * we are holding the uvn_sync_lock, but have dropped the uvn_wl_lock 2036 * (so we can now safely lock uvn's again). 2037 */ 2038 2039 for (uvn = uvn_sync_q.sqh_first ; uvn ; uvn = uvn->u_syncq.sqe_next) { 2040 simple_lock(&uvn->u_obj.vmobjlock); 2041 #ifdef DIAGNOSTIC 2042 if (uvn->u_flags & UVM_VNODE_DYING) { 2043 printf("uvm_vnp_sync: dying vnode on sync list\n"); 2044 } 2045 #endif 2046 uvn_flush(&uvn->u_obj, 0, 0, 2047 PGO_CLEANIT|PGO_ALLPAGES|PGO_DOACTCLUST); 2048 2049 /* 2050 * if we have the only reference and we just cleaned the uvn, 2051 * then we can pull it out of the UVM_VNODE_WRITEABLE state 2052 * thus allowing us to avoid thinking about flushing it again 2053 * on later sync ops. 2054 */ 2055 if (uvn->u_obj.uo_refs == 1 && 2056 (uvn->u_flags & UVM_VNODE_WRITEABLE)) { 2057 LIST_REMOVE(uvn, u_wlist); 2058 uvn->u_flags &= ~UVM_VNODE_WRITEABLE; 2059 } 2060 2061 simple_unlock(&uvn->u_obj.vmobjlock); 2062 2063 /* now drop our reference to the uvn */ 2064 uvn_detach(&uvn->u_obj); 2065 } 2066 2067 /* 2068 * done! release sync lock 2069 */ 2070 lockmgr(&uvn_sync_lock, LK_RELEASE, (void *)0); 2071 } 2072