1 /* $OpenBSD: uvm_vnode.c,v 1.2 1999/02/26 05:32:08 art Exp $ */ 2 /* $NetBSD: uvm_vnode.c,v 1.18 1999/01/29 12:56:17 bouyer Exp $ */ 3 4 /* 5 * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE! 6 * >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<< 7 */ 8 /* 9 * Copyright (c) 1997 Charles D. Cranor and Washington University. 10 * Copyright (c) 1991, 1993 11 * The Regents of the University of California. 12 * Copyright (c) 1990 University of Utah. 13 * 14 * All rights reserved. 15 * 16 * This code is derived from software contributed to Berkeley by 17 * the Systems Programming Group of the University of Utah Computer 18 * Science Department. 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 1. Redistributions of source code must retain the above copyright 24 * notice, this list of conditions and the following disclaimer. 25 * 2. Redistributions in binary form must reproduce the above copyright 26 * notice, this list of conditions and the following disclaimer in the 27 * documentation and/or other materials provided with the distribution. 28 * 3. All advertising materials mentioning features or use of this software 29 * must display the following acknowledgement: 30 * This product includes software developed by Charles D. Cranor, 31 * Washington University, the University of California, Berkeley and 32 * its contributors. 33 * 4. Neither the name of the University nor the names of its contributors 34 * may be used to endorse or promote products derived from this software 35 * without specific prior written permission. 36 * 37 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 38 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 39 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 40 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 41 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 42 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 43 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 44 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 45 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 46 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 47 * SUCH DAMAGE. 48 * 49 * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94 50 * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp 51 */ 52 53 /* 54 * uvm_vnode.c: the vnode pager. 55 */ 56 57 #include <sys/param.h> 58 #include <sys/systm.h> 59 #include <sys/proc.h> 60 #include <sys/malloc.h> 61 #include <sys/vnode.h> 62 #include <sys/disklabel.h> 63 #include <sys/ioctl.h> 64 #include <sys/fcntl.h> 65 #include <sys/conf.h> 66 67 #include <miscfs/specfs/specdev.h> 68 69 #include <vm/vm.h> 70 #include <vm/vm_page.h> 71 #include <vm/vm_kern.h> 72 73 #include <uvm/uvm.h> 74 #include <uvm/uvm_vnode.h> 75 76 /* 77 * private global data structure 78 * 79 * we keep a list of writeable active vnode-backed VM objects for sync op. 80 * we keep a simpleq of vnodes that are currently being sync'd. 81 */ 82 83 LIST_HEAD(uvn_list_struct, uvm_vnode); 84 static struct uvn_list_struct uvn_wlist; /* writeable uvns */ 85 static simple_lock_data_t uvn_wl_lock; /* locks uvn_wlist */ 86 87 SIMPLEQ_HEAD(uvn_sq_struct, uvm_vnode); 88 static struct uvn_sq_struct uvn_sync_q; /* sync'ing uvns */ 89 lock_data_t uvn_sync_lock; /* locks sync operation */ 90 91 /* 92 * functions 93 */ 94 95 static int uvn_asyncget __P((struct uvm_object *, vaddr_t, 96 int)); 97 struct uvm_object *uvn_attach __P((void *, vm_prot_t)); 98 static void uvn_cluster __P((struct uvm_object *, vaddr_t, 99 vaddr_t *, vaddr_t *)); 100 static void uvn_detach __P((struct uvm_object *)); 101 static boolean_t uvn_flush __P((struct uvm_object *, vaddr_t, 102 vaddr_t, int)); 103 static int uvn_get __P((struct uvm_object *, vaddr_t, 104 vm_page_t *, int *, int, 105 vm_prot_t, int, int)); 106 static void uvn_init __P((void)); 107 static int uvn_io __P((struct uvm_vnode *, vm_page_t *, 108 int, int, int)); 109 static int uvn_put __P((struct uvm_object *, vm_page_t *, 110 int, boolean_t)); 111 static void uvn_reference __P((struct uvm_object *)); 112 static boolean_t uvn_releasepg __P((struct vm_page *, 113 struct vm_page **)); 114 115 /* 116 * master pager structure 117 */ 118 119 struct uvm_pagerops uvm_vnodeops = { 120 uvn_init, 121 uvn_attach, 122 uvn_reference, 123 uvn_detach, 124 NULL, /* no specialized fault routine required */ 125 uvn_flush, 126 uvn_get, 127 uvn_asyncget, 128 uvn_put, 129 uvn_cluster, 130 uvm_mk_pcluster, /* use generic version of this: see uvm_pager.c */ 131 uvm_shareprot, /* !NULL: allow us in share maps */ 132 NULL, /* AIO-DONE function (not until we have asyncio) */ 133 uvn_releasepg, 134 }; 135 136 /* 137 * the ops! 138 */ 139 140 /* 141 * uvn_init 142 * 143 * init pager private data structures. 144 */ 145 146 static void 147 uvn_init() 148 { 149 150 LIST_INIT(&uvn_wlist); 151 simple_lock_init(&uvn_wl_lock); 152 /* note: uvn_sync_q init'd in uvm_vnp_sync() */ 153 lockinit(&uvn_sync_lock, PVM, "uvnsync", 0, 0); 154 } 155 156 /* 157 * uvn_attach 158 * 159 * attach a vnode structure to a VM object. if the vnode is already 160 * attached, then just bump the reference count by one and return the 161 * VM object. if not already attached, attach and return the new VM obj. 162 * the "accessprot" tells the max access the attaching thread wants to 163 * our pages. 164 * 165 * => caller must _not_ already be holding the lock on the uvm_object. 166 * => in fact, nothing should be locked so that we can sleep here. 167 * => note that uvm_object is first thing in vnode structure, so their 168 * pointers are equiv. 169 */ 170 171 struct uvm_object * 172 uvn_attach(arg, accessprot) 173 void *arg; 174 vm_prot_t accessprot; 175 { 176 struct vnode *vp = arg; 177 struct uvm_vnode *uvn = &vp->v_uvm; 178 struct vattr vattr; 179 int oldflags, result; 180 struct partinfo pi; 181 u_quad_t used_vnode_size; 182 UVMHIST_FUNC("uvn_attach"); UVMHIST_CALLED(maphist); 183 184 UVMHIST_LOG(maphist, "(vn=0x%x)", arg,0,0,0); 185 186 used_vnode_size = (u_quad_t)0; /* XXX gcc -Wuninitialized */ 187 188 /* 189 * first get a lock on the uvn. 190 */ 191 simple_lock(&uvn->u_obj.vmobjlock); 192 while (uvn->u_flags & UVM_VNODE_BLOCKED) { 193 uvn->u_flags |= UVM_VNODE_WANTED; 194 UVMHIST_LOG(maphist, " SLEEPING on blocked vn",0,0,0,0); 195 UVM_UNLOCK_AND_WAIT(uvn, &uvn->u_obj.vmobjlock, FALSE, 196 "uvn_attach", 0); 197 simple_lock(&uvn->u_obj.vmobjlock); 198 UVMHIST_LOG(maphist," WOKE UP",0,0,0,0); 199 } 200 201 /* 202 * if we're mapping a BLK device, make sure it is a disk. 203 */ 204 if (vp->v_type == VBLK && bdevsw[major(vp->v_rdev)].d_type != D_DISK) { 205 simple_unlock(&uvn->u_obj.vmobjlock); /* drop lock */ 206 UVMHIST_LOG(maphist,"<- done (VBLK not D_DISK!)", 0,0,0,0); 207 return(NULL); 208 } 209 210 /* 211 * now we have lock and uvn must not be in a blocked state. 212 * first check to see if it is already active, in which case 213 * we can bump the reference count, check to see if we need to 214 * add it to the writeable list, and then return. 215 */ 216 if (uvn->u_flags & UVM_VNODE_VALID) { /* already active? */ 217 218 /* regain VREF if we were persisting */ 219 if (uvn->u_obj.uo_refs == 0) { 220 VREF(vp); 221 UVMHIST_LOG(maphist," VREF (reclaim persisting vnode)", 222 0,0,0,0); 223 } 224 uvn->u_obj.uo_refs++; /* bump uvn ref! */ 225 226 /* check for new writeable uvn */ 227 if ((accessprot & VM_PROT_WRITE) != 0 && 228 (uvn->u_flags & UVM_VNODE_WRITEABLE) == 0) { 229 simple_lock(&uvn_wl_lock); 230 LIST_INSERT_HEAD(&uvn_wlist, uvn, u_wlist); 231 simple_unlock(&uvn_wl_lock); 232 /* we are now on wlist! */ 233 uvn->u_flags |= UVM_VNODE_WRITEABLE; 234 } 235 236 /* unlock and return */ 237 simple_unlock(&uvn->u_obj.vmobjlock); 238 UVMHIST_LOG(maphist,"<- done, refcnt=%d", uvn->u_obj.uo_refs, 239 0, 0, 0); 240 return (&uvn->u_obj); 241 } 242 243 /* 244 * need to call VOP_GETATTR() to get the attributes, but that could 245 * block (due to I/O), so we want to unlock the object before calling. 246 * however, we want to keep anyone else from playing with the object 247 * while it is unlocked. to do this we set UVM_VNODE_ALOCK which 248 * prevents anyone from attaching to the vnode until we are done with 249 * it. 250 */ 251 uvn->u_flags = UVM_VNODE_ALOCK; 252 simple_unlock(&uvn->u_obj.vmobjlock); /* drop lock in case we sleep */ 253 /* XXX: curproc? */ 254 255 if (vp->v_type == VBLK) { 256 /* 257 * We could implement this as a specfs getattr call, but: 258 * 259 * (1) VOP_GETATTR() would get the file system 260 * vnode operation, not the specfs operation. 261 * 262 * (2) All we want is the size, anyhow. 263 */ 264 result = (*bdevsw[major(vp->v_rdev)].d_ioctl)(vp->v_rdev, 265 DIOCGPART, (caddr_t)&pi, FREAD, curproc); 266 if (result == 0) { 267 /* XXX should remember blocksize */ 268 used_vnode_size = (u_quad_t)pi.disklab->d_secsize * 269 (u_quad_t)pi.part->p_size; 270 } 271 } else { 272 result = VOP_GETATTR(vp, &vattr, curproc->p_ucred, curproc); 273 if (result == 0) 274 used_vnode_size = vattr.va_size; 275 } 276 277 /* relock object */ 278 simple_lock(&uvn->u_obj.vmobjlock); 279 280 if (result != 0) { 281 if (uvn->u_flags & UVM_VNODE_WANTED) 282 wakeup(uvn); 283 uvn->u_flags = 0; 284 simple_unlock(&uvn->u_obj.vmobjlock); /* drop lock */ 285 UVMHIST_LOG(maphist,"<- done (VOP_GETATTR FAILED!)", 0,0,0,0); 286 return(NULL); 287 } 288 289 /* 290 * make sure that the newsize fits within a vaddr_t 291 * XXX: need to revise addressing data types 292 */ 293 #ifdef DEBUG 294 if (vp->v_type == VBLK) 295 printf("used_vnode_size = %qu\n", used_vnode_size); 296 #endif 297 if (used_vnode_size > (vaddr_t) -PAGE_SIZE) { 298 #ifdef DEBUG 299 printf("uvn_attach: vn %p size truncated %qx->%x\n", vp, 300 used_vnode_size, -PAGE_SIZE); 301 #endif 302 used_vnode_size = (vaddr_t) -PAGE_SIZE; 303 } 304 305 /* 306 * now set up the uvn. 307 */ 308 uvn->u_obj.pgops = &uvm_vnodeops; 309 TAILQ_INIT(&uvn->u_obj.memq); 310 uvn->u_obj.uo_npages = 0; 311 uvn->u_obj.uo_refs = 1; /* just us... */ 312 oldflags = uvn->u_flags; 313 uvn->u_flags = UVM_VNODE_VALID|UVM_VNODE_CANPERSIST; 314 uvn->u_nio = 0; 315 uvn->u_size = used_vnode_size; 316 317 /* if write access, we need to add it to the wlist */ 318 if (accessprot & VM_PROT_WRITE) { 319 simple_lock(&uvn_wl_lock); 320 LIST_INSERT_HEAD(&uvn_wlist, uvn, u_wlist); 321 simple_unlock(&uvn_wl_lock); 322 uvn->u_flags |= UVM_VNODE_WRITEABLE; /* we are on wlist! */ 323 } 324 325 /* 326 * add a reference to the vnode. this reference will stay as long 327 * as there is a valid mapping of the vnode. dropped when the 328 * reference count goes to zero [and we either free or persist]. 329 */ 330 VREF(vp); 331 simple_unlock(&uvn->u_obj.vmobjlock); 332 if (oldflags & UVM_VNODE_WANTED) 333 wakeup(uvn); 334 335 UVMHIST_LOG(maphist,"<- done/VREF, ret 0x%x", &uvn->u_obj,0,0,0); 336 return(&uvn->u_obj); 337 } 338 339 340 /* 341 * uvn_reference 342 * 343 * duplicate a reference to a VM object. Note that the reference 344 * count must already be at least one (the passed in reference) so 345 * there is no chance of the uvn being killed or locked out here. 346 * 347 * => caller must call with object unlocked. 348 * => caller must be using the same accessprot as was used at attach time 349 */ 350 351 352 static void 353 uvn_reference(uobj) 354 struct uvm_object *uobj; 355 { 356 #ifdef DIAGNOSTIC 357 struct uvm_vnode *uvn = (struct uvm_vnode *) uobj; 358 #endif 359 UVMHIST_FUNC("uvn_reference"); UVMHIST_CALLED(maphist); 360 361 simple_lock(&uobj->vmobjlock); 362 #ifdef DIAGNOSTIC 363 if ((uvn->u_flags & UVM_VNODE_VALID) == 0) { 364 printf("uvn_reference: ref=%d, flags=0x%x\n", uvn->u_flags, 365 uobj->uo_refs); 366 panic("uvn_reference: invalid state"); 367 } 368 #endif 369 uobj->uo_refs++; 370 UVMHIST_LOG(maphist, "<- done (uobj=0x%x, ref = %d)", 371 uobj, uobj->uo_refs,0,0); 372 simple_unlock(&uobj->vmobjlock); 373 } 374 375 /* 376 * uvn_detach 377 * 378 * remove a reference to a VM object. 379 * 380 * => caller must call with object unlocked and map locked. 381 * => this starts the detach process, but doesn't have to finish it 382 * (async i/o could still be pending). 383 */ 384 static void 385 uvn_detach(uobj) 386 struct uvm_object *uobj; 387 { 388 struct uvm_vnode *uvn; 389 struct vnode *vp; 390 int oldflags; 391 UVMHIST_FUNC("uvn_detach"); UVMHIST_CALLED(maphist); 392 393 simple_lock(&uobj->vmobjlock); 394 395 UVMHIST_LOG(maphist," (uobj=0x%x) ref=%d", uobj,uobj->uo_refs,0,0); 396 uobj->uo_refs--; /* drop ref! */ 397 if (uobj->uo_refs) { /* still more refs */ 398 simple_unlock(&uobj->vmobjlock); 399 UVMHIST_LOG(maphist, "<- done (rc>0)", 0,0,0,0); 400 return; 401 } 402 403 /* 404 * get other pointers ... 405 */ 406 407 uvn = (struct uvm_vnode *) uobj; 408 vp = (struct vnode *) uobj; 409 410 /* 411 * clear VTEXT flag now that there are no mappings left (VTEXT is used 412 * to keep an active text file from being overwritten). 413 */ 414 vp->v_flag &= ~VTEXT; 415 416 /* 417 * we just dropped the last reference to the uvn. see if we can 418 * let it "stick around". 419 */ 420 421 if (uvn->u_flags & UVM_VNODE_CANPERSIST) { 422 /* won't block */ 423 uvn_flush(uobj, 0, 0, PGO_DEACTIVATE|PGO_ALLPAGES); 424 simple_unlock(&uobj->vmobjlock); 425 vrele(vp); /* drop vnode reference */ 426 UVMHIST_LOG(maphist,"<- done/vrele! (persist)", 0,0,0,0); 427 return; 428 } 429 430 /* 431 * its a goner! 432 */ 433 434 UVMHIST_LOG(maphist," its a goner (flushing)!", 0,0,0,0); 435 436 uvn->u_flags |= UVM_VNODE_DYING; 437 438 /* 439 * even though we may unlock in flush, no one can gain a reference 440 * to us until we clear the "dying" flag [because it blocks 441 * attaches]. we will not do that until after we've disposed of all 442 * the pages with uvn_flush(). note that before the flush the only 443 * pages that could be marked PG_BUSY are ones that are in async 444 * pageout by the daemon. (there can't be any pending "get"'s 445 * because there are no references to the object). 446 */ 447 448 (void) uvn_flush(uobj, 0, 0, PGO_CLEANIT|PGO_FREE|PGO_ALLPAGES); 449 450 UVMHIST_LOG(maphist," its a goner (done flush)!", 0,0,0,0); 451 452 /* 453 * given the structure of this pager, the above flush request will 454 * create the following state: all the pages that were in the object 455 * have either been free'd or they are marked PG_BUSY|PG_RELEASED. 456 * the PG_BUSY bit was set either by us or the daemon for async I/O. 457 * in either case, if we have pages left we can't kill the object 458 * yet because i/o is pending. in this case we set the "relkill" 459 * flag which will cause pgo_releasepg to kill the object once all 460 * the I/O's are done [pgo_releasepg will be called from the aiodone 461 * routine or from the page daemon]. 462 */ 463 464 if (uobj->uo_npages) { /* I/O pending. iodone will free */ 465 #ifdef DIAGNOSTIC 466 /* 467 * XXXCDC: very unlikely to happen until we have async i/o 468 * so print a little info message in case it does. 469 */ 470 printf("uvn_detach: vn %p has pages left after flush - " 471 "relkill mode\n", uobj); 472 #endif 473 uvn->u_flags |= UVM_VNODE_RELKILL; 474 simple_unlock(&uobj->vmobjlock); 475 UVMHIST_LOG(maphist,"<- done! (releasepg will kill obj)", 0, 0, 476 0, 0); 477 return; 478 } 479 480 /* 481 * kill object now. note that we can't be on the sync q because 482 * all references are gone. 483 */ 484 if (uvn->u_flags & UVM_VNODE_WRITEABLE) { 485 simple_lock(&uvn_wl_lock); /* protect uvn_wlist */ 486 LIST_REMOVE(uvn, u_wlist); 487 simple_unlock(&uvn_wl_lock); 488 } 489 #ifdef DIAGNOSTIC 490 if (uobj->memq.tqh_first != NULL) 491 panic("uvn_deref: vnode VM object still has pages afer " 492 "syncio/free flush"); 493 #endif 494 oldflags = uvn->u_flags; 495 uvn->u_flags = 0; 496 simple_unlock(&uobj->vmobjlock); 497 498 /* wake up any sleepers */ 499 if (oldflags & UVM_VNODE_WANTED) 500 wakeup(uvn); 501 502 /* 503 * drop our reference to the vnode. 504 */ 505 vrele(vp); 506 UVMHIST_LOG(maphist,"<- done (vrele) final", 0,0,0,0); 507 508 return; 509 } 510 511 /* 512 * uvm_vnp_terminate: external hook to clear out a vnode's VM 513 * 514 * called in two cases: 515 * [1] when a persisting vnode vm object (i.e. one with a zero reference 516 * count) needs to be freed so that a vnode can be reused. this 517 * happens under "getnewvnode" in vfs_subr.c. if the vnode from 518 * the free list is still attached (i.e. not VBAD) then vgone is 519 * called. as part of the vgone trace this should get called to 520 * free the vm object. this is the common case. 521 * [2] when a filesystem is being unmounted by force (MNT_FORCE, 522 * "umount -f") the vgone() function is called on active vnodes 523 * on the mounted file systems to kill their data (the vnodes become 524 * "dead" ones [see src/sys/miscfs/deadfs/...]). that results in a 525 * call here (even if the uvn is still in use -- i.e. has a non-zero 526 * reference count). this case happens at "umount -f" and during a 527 * "reboot/halt" operation. 528 * 529 * => the caller must XLOCK and VOP_LOCK the vnode before calling us 530 * [protects us from getting a vnode that is already in the DYING 531 * state...] 532 * => unlike uvn_detach, this function must not return until all the 533 * uvn's pages are disposed of. 534 * => in case [2] the uvn is still alive after this call, but all I/O 535 * ops will fail (due to the backing vnode now being "dead"). this 536 * will prob. kill any process using the uvn due to pgo_get failing. 537 */ 538 539 void 540 uvm_vnp_terminate(vp) 541 struct vnode *vp; 542 { 543 struct uvm_vnode *uvn = &vp->v_uvm; 544 int oldflags; 545 UVMHIST_FUNC("uvm_vnp_terminate"); UVMHIST_CALLED(maphist); 546 547 /* 548 * lock object and check if it is valid 549 */ 550 simple_lock(&uvn->u_obj.vmobjlock); 551 UVMHIST_LOG(maphist, " vp=0x%x, ref=%d, flag=0x%x", vp, 552 uvn->u_obj.uo_refs, uvn->u_flags, 0); 553 if ((uvn->u_flags & UVM_VNODE_VALID) == 0) { 554 simple_unlock(&uvn->u_obj.vmobjlock); 555 UVMHIST_LOG(maphist, "<- done (not active)", 0, 0, 0, 0); 556 return; 557 } 558 559 /* 560 * must be a valid uvn that is not already dying (because XLOCK 561 * protects us from that). the uvn can't in the the ALOCK state 562 * because it is valid, and uvn's that are in the ALOCK state haven't 563 * been marked valid yet. 564 */ 565 566 #ifdef DEBUG 567 /* 568 * debug check: are we yanking the vnode out from under our uvn? 569 */ 570 if (uvn->u_obj.uo_refs) { 571 printf("uvm_vnp_terminate(%p): terminating active vnode " 572 "(refs=%d)\n", uvn, uvn->u_obj.uo_refs); 573 } 574 #endif 575 576 /* 577 * it is possible that the uvn was detached and is in the relkill 578 * state [i.e. waiting for async i/o to finish so that releasepg can 579 * kill object]. we take over the vnode now and cancel the relkill. 580 * we want to know when the i/o is done so we can recycle right 581 * away. note that a uvn can only be in the RELKILL state if it 582 * has a zero reference count. 583 */ 584 585 if (uvn->u_flags & UVM_VNODE_RELKILL) 586 uvn->u_flags &= ~UVM_VNODE_RELKILL; /* cancel RELKILL */ 587 588 /* 589 * block the uvn by setting the dying flag, and then flush the 590 * pages. (note that flush may unlock object while doing I/O, but 591 * it will re-lock it before it returns control here). 592 * 593 * also, note that we tell I/O that we are already VOP_LOCK'd so 594 * that uvn_io doesn't attempt to VOP_LOCK again. 595 * 596 * XXXCDC: setting VNISLOCKED on an active uvn which is being terminated 597 * due to a forceful unmount might not be a good idea. maybe we 598 * need a way to pass in this info to uvn_flush through a 599 * pager-defined PGO_ constant [currently there are none]. 600 */ 601 uvn->u_flags |= UVM_VNODE_DYING|UVM_VNODE_VNISLOCKED; 602 603 (void) uvn_flush(&uvn->u_obj, 0, 0, PGO_CLEANIT|PGO_FREE|PGO_ALLPAGES); 604 605 /* 606 * as we just did a flush we expect all the pages to be gone or in 607 * the process of going. sleep to wait for the rest to go [via iosync]. 608 */ 609 610 while (uvn->u_obj.uo_npages) { 611 #ifdef DIAGNOSTIC 612 struct vm_page *pp; 613 for (pp = uvn->u_obj.memq.tqh_first ; pp != NULL ; 614 pp = pp->listq.tqe_next) { 615 if ((pp->flags & PG_BUSY) == 0) 616 panic("uvm_vnp_terminate: detected unbusy pg"); 617 } 618 if (uvn->u_nio == 0) 619 panic("uvm_vnp_terminate: no I/O to wait for?"); 620 printf("uvm_vnp_terminate: waiting for I/O to fin.\n"); 621 /* 622 * XXXCDC: this is unlikely to happen without async i/o so we 623 * put a printf in just to keep an eye on it. 624 */ 625 #endif 626 uvn->u_flags |= UVM_VNODE_IOSYNC; 627 UVM_UNLOCK_AND_WAIT(&uvn->u_nio, &uvn->u_obj.vmobjlock, FALSE, 628 "uvn_term",0); 629 simple_lock(&uvn->u_obj.vmobjlock); 630 } 631 632 /* 633 * done. now we free the uvn if its reference count is zero 634 * (true if we are zapping a persisting uvn). however, if we are 635 * terminating a uvn with active mappings we let it live ... future 636 * calls down to the vnode layer will fail. 637 */ 638 639 oldflags = uvn->u_flags; 640 if (uvn->u_obj.uo_refs) { 641 642 /* 643 * uvn must live on it is dead-vnode state until all references 644 * are gone. restore flags. clear CANPERSIST state. 645 */ 646 647 uvn->u_flags &= ~(UVM_VNODE_DYING|UVM_VNODE_VNISLOCKED| 648 UVM_VNODE_WANTED|UVM_VNODE_CANPERSIST); 649 650 } else { 651 652 /* 653 * free the uvn now. note that the VREF reference is already 654 * gone [it is dropped when we enter the persist state]. 655 */ 656 if (uvn->u_flags & UVM_VNODE_IOSYNCWANTED) 657 panic("uvm_vnp_terminate: io sync wanted bit set"); 658 659 if (uvn->u_flags & UVM_VNODE_WRITEABLE) { 660 simple_lock(&uvn_wl_lock); 661 LIST_REMOVE(uvn, u_wlist); 662 simple_unlock(&uvn_wl_lock); 663 } 664 uvn->u_flags = 0; /* uvn is history, clear all bits */ 665 } 666 667 if (oldflags & UVM_VNODE_WANTED) 668 wakeup(uvn); /* object lock still held */ 669 670 simple_unlock(&uvn->u_obj.vmobjlock); 671 UVMHIST_LOG(maphist, "<- done", 0, 0, 0, 0); 672 673 } 674 675 /* 676 * uvn_releasepg: handled a released page in a uvn 677 * 678 * => "pg" is a PG_BUSY [caller owns it], PG_RELEASED page that we need 679 * to dispose of. 680 * => caller must handled PG_WANTED case 681 * => called with page's object locked, pageq's unlocked 682 * => returns TRUE if page's object is still alive, FALSE if we 683 * killed the page's object. if we return TRUE, then we 684 * return with the object locked. 685 * => if (nextpgp != NULL) => we return pageq.tqe_next here, and return 686 * with the page queues locked [for pagedaemon] 687 * => if (nextpgp == NULL) => we return with page queues unlocked [normal case] 688 * => we kill the uvn if it is not referenced and we are suppose to 689 * kill it ("relkill"). 690 */ 691 692 boolean_t 693 uvn_releasepg(pg, nextpgp) 694 struct vm_page *pg; 695 struct vm_page **nextpgp; /* OUT */ 696 { 697 struct uvm_vnode *uvn = (struct uvm_vnode *) pg->uobject; 698 #ifdef DIAGNOSTIC 699 if ((pg->flags & PG_RELEASED) == 0) 700 panic("uvn_releasepg: page not released!"); 701 #endif 702 703 /* 704 * dispose of the page [caller handles PG_WANTED] 705 */ 706 pmap_page_protect(PMAP_PGARG(pg), VM_PROT_NONE); 707 uvm_lock_pageq(); 708 if (nextpgp) 709 *nextpgp = pg->pageq.tqe_next; /* next page for daemon */ 710 uvm_pagefree(pg); 711 if (!nextpgp) 712 uvm_unlock_pageq(); 713 714 /* 715 * now see if we need to kill the object 716 */ 717 if (uvn->u_flags & UVM_VNODE_RELKILL) { 718 if (uvn->u_obj.uo_refs) 719 panic("uvn_releasepg: kill flag set on referenced " 720 "object!"); 721 if (uvn->u_obj.uo_npages == 0) { 722 if (uvn->u_flags & UVM_VNODE_WRITEABLE) { 723 simple_lock(&uvn_wl_lock); 724 LIST_REMOVE(uvn, u_wlist); 725 simple_unlock(&uvn_wl_lock); 726 } 727 #ifdef DIAGNOSTIC 728 if (uvn->u_obj.memq.tqh_first) 729 panic("uvn_releasepg: pages in object with npages == 0"); 730 #endif 731 if (uvn->u_flags & UVM_VNODE_WANTED) 732 /* still holding object lock */ 733 wakeup(uvn); 734 735 uvn->u_flags = 0; /* DEAD! */ 736 simple_unlock(&uvn->u_obj.vmobjlock); 737 return (FALSE); 738 } 739 } 740 return (TRUE); 741 } 742 743 /* 744 * NOTE: currently we have to use VOP_READ/VOP_WRITE because they go 745 * through the buffer cache and allow I/O in any size. These VOPs use 746 * synchronous i/o. [vs. VOP_STRATEGY which can be async, but doesn't 747 * go through the buffer cache or allow I/O sizes larger than a 748 * block]. we will eventually want to change this. 749 * 750 * issues to consider: 751 * uvm provides the uvm_aiodesc structure for async i/o management. 752 * there are two tailq's in the uvm. structure... one for pending async 753 * i/o and one for "done" async i/o. to do an async i/o one puts 754 * an aiodesc on the "pending" list (protected by splbio()), starts the 755 * i/o and returns VM_PAGER_PEND. when the i/o is done, we expect 756 * some sort of "i/o done" function to be called (at splbio(), interrupt 757 * time). this function should remove the aiodesc from the pending list 758 * and place it on the "done" list and wakeup the daemon. the daemon 759 * will run at normal spl() and will remove all items from the "done" 760 * list and call the "aiodone" hook for each done request (see uvm_pager.c). 761 * [in the old vm code, this was done by calling the "put" routine with 762 * null arguments which made the code harder to read and understand because 763 * you had one function ("put") doing two things.] 764 * 765 * so the current pager needs: 766 * int uvn_aiodone(struct uvm_aiodesc *) 767 * 768 * => return KERN_SUCCESS (aio finished, free it). otherwise requeue for 769 * later collection. 770 * => called with pageq's locked by the daemon. 771 * 772 * general outline: 773 * - "try" to lock object. if fail, just return (will try again later) 774 * - drop "u_nio" (this req is done!) 775 * - if (object->iosync && u_naio == 0) { wakeup &uvn->u_naio } 776 * - get "page" structures (atop?). 777 * - handle "wanted" pages 778 * - handle "released" pages [using pgo_releasepg] 779 * >>> pgo_releasepg may kill the object 780 * dont forget to look at "object" wanted flag in all cases. 781 */ 782 783 784 /* 785 * uvn_flush: flush pages out of a uvm object. 786 * 787 * => object should be locked by caller. we may _unlock_ the object 788 * if (and only if) we need to clean a page (PGO_CLEANIT). 789 * we return with the object locked. 790 * => if PGO_CLEANIT is set, we may block (due to I/O). thus, a caller 791 * might want to unlock higher level resources (e.g. vm_map) 792 * before calling flush. 793 * => if PGO_CLEANIT is not set, then we will neither unlock the object 794 * or block. 795 * => if PGO_ALLPAGE is set, then all pages in the object are valid targets 796 * for flushing. 797 * => NOTE: we rely on the fact that the object's memq is a TAILQ and 798 * that new pages are inserted on the tail end of the list. thus, 799 * we can make a complete pass through the object in one go by starting 800 * at the head and working towards the tail (new pages are put in 801 * front of us). 802 * => NOTE: we are allowed to lock the page queues, so the caller 803 * must not be holding the lock on them [e.g. pagedaemon had 804 * better not call us with the queues locked] 805 * => we return TRUE unless we encountered some sort of I/O error 806 * 807 * comment on "cleaning" object and PG_BUSY pages: 808 * this routine is holding the lock on the object. the only time 809 * that it can run into a PG_BUSY page that it does not own is if 810 * some other process has started I/O on the page (e.g. either 811 * a pagein, or a pageout). if the PG_BUSY page is being paged 812 * in, then it can not be dirty (!PG_CLEAN) because no one has 813 * had a chance to modify it yet. if the PG_BUSY page is being 814 * paged out then it means that someone else has already started 815 * cleaning the page for us (how nice!). in this case, if we 816 * have syncio specified, then after we make our pass through the 817 * object we need to wait for the other PG_BUSY pages to clear 818 * off (i.e. we need to do an iosync). also note that once a 819 * page is PG_BUSY it must stay in its object until it is un-busyed. 820 * 821 * note on page traversal: 822 * we can traverse the pages in an object either by going down the 823 * linked list in "uobj->memq", or we can go over the address range 824 * by page doing hash table lookups for each address. depending 825 * on how many pages are in the object it may be cheaper to do one 826 * or the other. we set "by_list" to true if we are using memq. 827 * if the cost of a hash lookup was equal to the cost of the list 828 * traversal we could compare the number of pages in the start->stop 829 * range to the total number of pages in the object. however, it 830 * seems that a hash table lookup is more expensive than the linked 831 * list traversal, so we multiply the number of pages in the 832 * start->stop range by a penalty which we define below. 833 */ 834 835 #define UVN_HASH_PENALTY 4 /* XXX: a guess */ 836 837 static boolean_t 838 uvn_flush(uobj, start, stop, flags) 839 struct uvm_object *uobj; 840 vaddr_t start, stop; 841 int flags; 842 { 843 struct uvm_vnode *uvn = (struct uvm_vnode *) uobj; 844 struct vm_page *pp, *ppnext, *ptmp; 845 struct vm_page *pps[MAXBSIZE >> PAGE_SHIFT], **ppsp; 846 int npages, result, lcv; 847 boolean_t retval, need_iosync, by_list, needs_clean; 848 vaddr_t curoff; 849 u_short pp_version; 850 UVMHIST_FUNC("uvn_flush"); UVMHIST_CALLED(maphist); 851 852 curoff = 0; /* XXX: shut up gcc */ 853 /* 854 * get init vals and determine how we are going to traverse object 855 */ 856 857 need_iosync = FALSE; 858 retval = TRUE; /* return value */ 859 if (flags & PGO_ALLPAGES) { 860 start = 0; 861 stop = round_page(uvn->u_size); 862 by_list = TRUE; /* always go by the list */ 863 } else { 864 start = trunc_page(start); 865 stop = round_page(stop); 866 if (stop > round_page(uvn->u_size)) 867 printf("uvn_flush: strange, got an out of range " 868 "flush (fixed)\n"); 869 870 by_list = (uobj->uo_npages <= 871 ((stop - start) >> PAGE_SHIFT) * UVN_HASH_PENALTY); 872 } 873 874 UVMHIST_LOG(maphist, 875 " flush start=0x%x, stop=0x%x, by_list=%d, flags=0x%x", 876 start, stop, by_list, flags); 877 878 /* 879 * PG_CLEANCHK: this bit is used by the pgo_mk_pcluster function as 880 * a _hint_ as to how up to date the PG_CLEAN bit is. if the hint 881 * is wrong it will only prevent us from clustering... it won't break 882 * anything. we clear all PG_CLEANCHK bits here, and pgo_mk_pcluster 883 * will set them as it syncs PG_CLEAN. This is only an issue if we 884 * are looking at non-inactive pages (because inactive page's PG_CLEAN 885 * bit is always up to date since there are no mappings). 886 * [borrowed PG_CLEANCHK idea from FreeBSD VM] 887 */ 888 889 if ((flags & PGO_CLEANIT) != 0 && 890 uobj->pgops->pgo_mk_pcluster != NULL) { 891 if (by_list) { 892 for (pp = uobj->memq.tqh_first ; pp != NULL ; 893 pp = pp->listq.tqe_next) { 894 if (pp->offset < start || pp->offset >= stop) 895 continue; 896 pp->flags &= ~PG_CLEANCHK; 897 } 898 899 } else { /* by hash */ 900 for (curoff = start ; curoff < stop; 901 curoff += PAGE_SIZE) { 902 pp = uvm_pagelookup(uobj, curoff); 903 if (pp) 904 pp->flags &= ~PG_CLEANCHK; 905 } 906 } 907 } 908 909 /* 910 * now do it. note: we must update ppnext in body of loop or we 911 * will get stuck. we need to use ppnext because we may free "pp" 912 * before doing the next loop. 913 */ 914 915 if (by_list) { 916 pp = uobj->memq.tqh_first; 917 } else { 918 curoff = start; 919 pp = uvm_pagelookup(uobj, curoff); 920 } 921 922 ppnext = NULL; /* XXX: shut up gcc */ 923 ppsp = NULL; /* XXX: shut up gcc */ 924 uvm_lock_pageq(); /* page queues locked */ 925 926 /* locked: both page queues and uobj */ 927 for ( ; (by_list && pp != NULL) || 928 (!by_list && curoff < stop) ; pp = ppnext) { 929 930 if (by_list) { 931 932 /* 933 * range check 934 */ 935 936 if (pp->offset < start || pp->offset >= stop) { 937 ppnext = pp->listq.tqe_next; 938 continue; 939 } 940 941 } else { 942 943 /* 944 * null check 945 */ 946 947 curoff += PAGE_SIZE; 948 if (pp == NULL) { 949 if (curoff < stop) 950 ppnext = uvm_pagelookup(uobj, curoff); 951 continue; 952 } 953 954 } 955 956 /* 957 * handle case where we do not need to clean page (either 958 * because we are not clean or because page is not dirty or 959 * is busy): 960 * 961 * NOTE: we are allowed to deactivate a non-wired active 962 * PG_BUSY page, but once a PG_BUSY page is on the inactive 963 * queue it must stay put until it is !PG_BUSY (so as not to 964 * confuse pagedaemon). 965 */ 966 967 if ((flags & PGO_CLEANIT) == 0 || (pp->flags & PG_BUSY) != 0) { 968 needs_clean = FALSE; 969 if ((pp->flags & PG_BUSY) != 0 && 970 (flags & (PGO_CLEANIT|PGO_SYNCIO)) == 971 (PGO_CLEANIT|PGO_SYNCIO)) 972 need_iosync = TRUE; 973 } else { 974 /* 975 * freeing: nuke all mappings so we can sync 976 * PG_CLEAN bit with no race 977 */ 978 if ((pp->flags & PG_CLEAN) != 0 && 979 (flags & PGO_FREE) != 0 && 980 (pp->pqflags & PQ_ACTIVE) != 0) 981 pmap_page_protect(PMAP_PGARG(pp), VM_PROT_NONE); 982 if ((pp->flags & PG_CLEAN) != 0 && 983 pmap_is_modified(PMAP_PGARG(pp))) 984 pp->flags &= ~(PG_CLEAN); 985 pp->flags |= PG_CLEANCHK; /* update "hint" */ 986 987 needs_clean = ((pp->flags & PG_CLEAN) == 0); 988 } 989 990 /* 991 * if we don't need a clean... load ppnext and dispose of pp 992 */ 993 if (!needs_clean) { 994 /* load ppnext */ 995 if (by_list) 996 ppnext = pp->listq.tqe_next; 997 else { 998 if (curoff < stop) 999 ppnext = uvm_pagelookup(uobj, curoff); 1000 } 1001 1002 /* now dispose of pp */ 1003 if (flags & PGO_DEACTIVATE) { 1004 if ((pp->pqflags & PQ_INACTIVE) == 0 && 1005 pp->wire_count == 0) { 1006 pmap_page_protect(PMAP_PGARG(pp), 1007 VM_PROT_NONE); 1008 uvm_pagedeactivate(pp); 1009 } 1010 1011 } else if (flags & PGO_FREE) { 1012 if (pp->flags & PG_BUSY) { 1013 /* release busy pages */ 1014 pp->flags |= PG_RELEASED; 1015 } else { 1016 pmap_page_protect(PMAP_PGARG(pp), 1017 VM_PROT_NONE); 1018 /* removed page from object */ 1019 uvm_pagefree(pp); 1020 } 1021 } 1022 /* ppnext is valid so we can continue... */ 1023 continue; 1024 } 1025 1026 /* 1027 * pp points to a page in the locked object that we are 1028 * working on. if it is !PG_CLEAN,!PG_BUSY and we asked 1029 * for cleaning (PGO_CLEANIT). we clean it now. 1030 * 1031 * let uvm_pager_put attempted a clustered page out. 1032 * note: locked: uobj and page queues. 1033 */ 1034 1035 pp->flags |= PG_BUSY; /* we 'own' page now */ 1036 UVM_PAGE_OWN(pp, "uvn_flush"); 1037 pmap_page_protect(PMAP_PGARG(pp), VM_PROT_READ); 1038 pp_version = pp->version; 1039 ReTry: 1040 ppsp = pps; 1041 npages = sizeof(pps) / sizeof(struct vm_page *); 1042 1043 /* locked: page queues, uobj */ 1044 result = uvm_pager_put(uobj, pp, &ppsp, &npages, 1045 flags | PGO_DOACTCLUST, start, stop); 1046 /* unlocked: page queues, uobj */ 1047 1048 /* 1049 * at this point nothing is locked. if we did an async I/O 1050 * it is remotely possible for the async i/o to complete and 1051 * the page "pp" be freed or what not before we get a chance 1052 * to relock the object. in order to detect this, we have 1053 * saved the version number of the page in "pp_version". 1054 */ 1055 1056 /* relock! */ 1057 simple_lock(&uobj->vmobjlock); 1058 uvm_lock_pageq(); 1059 1060 /* 1061 * VM_PAGER_AGAIN: given the structure of this pager, this 1062 * can only happen when we are doing async I/O and can't 1063 * map the pages into kernel memory (pager_map) due to lack 1064 * of vm space. if this happens we drop back to sync I/O. 1065 */ 1066 1067 if (result == VM_PAGER_AGAIN) { 1068 /* 1069 * it is unlikely, but page could have been released 1070 * while we had the object lock dropped. we ignore 1071 * this now and retry the I/O. we will detect and 1072 * handle the released page after the syncio I/O 1073 * completes. 1074 */ 1075 #ifdef DIAGNOSTIC 1076 if (flags & PGO_SYNCIO) 1077 panic("uvn_flush: PGO_SYNCIO return 'try again' error (impossible)"); 1078 #endif 1079 flags |= PGO_SYNCIO; 1080 goto ReTry; 1081 } 1082 1083 /* 1084 * the cleaning operation is now done. finish up. note that 1085 * on error (!OK, !PEND) uvm_pager_put drops the cluster for us. 1086 * if success (OK, PEND) then uvm_pager_put returns the cluster 1087 * to us in ppsp/npages. 1088 */ 1089 1090 /* 1091 * for pending async i/o if we are not deactivating/freeing 1092 * we can move on to the next page. 1093 */ 1094 1095 if (result == VM_PAGER_PEND) { 1096 1097 if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) { 1098 /* 1099 * no per-page ops: refresh ppnext and continue 1100 */ 1101 if (by_list) { 1102 if (pp->version == pp_version) 1103 ppnext = pp->listq.tqe_next; 1104 else 1105 /* reset */ 1106 ppnext = uobj->memq.tqh_first; 1107 } else { 1108 if (curoff < stop) 1109 ppnext = uvm_pagelookup(uobj, 1110 curoff); 1111 } 1112 continue; 1113 } 1114 1115 /* need to do anything here? */ 1116 } 1117 1118 /* 1119 * need to look at each page of the I/O operation. we defer 1120 * processing "pp" until the last trip through this "for" loop 1121 * so that we can load "ppnext" for the main loop after we 1122 * play with the cluster pages [thus the "npages + 1" in the 1123 * loop below]. 1124 */ 1125 1126 for (lcv = 0 ; lcv < npages + 1 ; lcv++) { 1127 1128 /* 1129 * handle ppnext for outside loop, and saving pp 1130 * until the end. 1131 */ 1132 if (lcv < npages) { 1133 if (ppsp[lcv] == pp) 1134 continue; /* skip pp until the end */ 1135 ptmp = ppsp[lcv]; 1136 } else { 1137 ptmp = pp; 1138 1139 /* set up next page for outer loop */ 1140 if (by_list) { 1141 if (pp->version == pp_version) 1142 ppnext = pp->listq.tqe_next; 1143 else 1144 /* reset */ 1145 ppnext = uobj->memq.tqh_first; 1146 } else { 1147 if (curoff < stop) 1148 ppnext = uvm_pagelookup(uobj, curoff); 1149 } 1150 } 1151 1152 /* 1153 * verify the page didn't get moved while obj was 1154 * unlocked 1155 */ 1156 if (result == VM_PAGER_PEND && ptmp->uobject != uobj) 1157 continue; 1158 1159 /* 1160 * unbusy the page if I/O is done. note that for 1161 * pending I/O it is possible that the I/O op 1162 * finished before we relocked the object (in 1163 * which case the page is no longer busy). 1164 */ 1165 1166 if (result != VM_PAGER_PEND) { 1167 if (ptmp->flags & PG_WANTED) 1168 /* still holding object lock */ 1169 thread_wakeup(ptmp); 1170 1171 ptmp->flags &= ~(PG_WANTED|PG_BUSY); 1172 UVM_PAGE_OWN(ptmp, NULL); 1173 if (ptmp->flags & PG_RELEASED) { 1174 1175 /* pgo_releasepg wants this */ 1176 uvm_unlock_pageq(); 1177 if (!uvn_releasepg(ptmp, NULL)) 1178 return (TRUE); 1179 1180 uvm_lock_pageq(); /* relock */ 1181 continue; /* next page */ 1182 1183 } else { 1184 ptmp->flags |= (PG_CLEAN|PG_CLEANCHK); 1185 if ((flags & PGO_FREE) == 0) 1186 pmap_clear_modify( 1187 PMAP_PGARG(ptmp)); 1188 } 1189 } 1190 1191 /* 1192 * dispose of page 1193 */ 1194 1195 if (flags & PGO_DEACTIVATE) { 1196 if ((pp->pqflags & PQ_INACTIVE) == 0 && 1197 pp->wire_count == 0) { 1198 pmap_page_protect(PMAP_PGARG(ptmp), 1199 VM_PROT_NONE); 1200 uvm_pagedeactivate(ptmp); 1201 } 1202 1203 } else if (flags & PGO_FREE) { 1204 if (result == VM_PAGER_PEND) { 1205 if ((ptmp->flags & PG_BUSY) != 0) 1206 /* signal for i/o done */ 1207 ptmp->flags |= PG_RELEASED; 1208 } else { 1209 if (result != VM_PAGER_OK) { 1210 printf("uvn_flush: obj=%p, " 1211 "offset=0x%lx. error " 1212 "during pageout.\n", 1213 pp->uobject, pp->offset); 1214 printf("uvn_flush: WARNING: " 1215 "changes to page may be " 1216 "lost!\n"); 1217 retval = FALSE; 1218 } 1219 pmap_page_protect(PMAP_PGARG(ptmp), 1220 VM_PROT_NONE); 1221 uvm_pagefree(ptmp); 1222 } 1223 } 1224 1225 } /* end of "lcv" for loop */ 1226 1227 } /* end of "pp" for loop */ 1228 1229 /* 1230 * done with pagequeues: unlock 1231 */ 1232 uvm_unlock_pageq(); 1233 1234 /* 1235 * now wait for all I/O if required. 1236 */ 1237 if (need_iosync) { 1238 1239 UVMHIST_LOG(maphist," <<DOING IOSYNC>>",0,0,0,0); 1240 while (uvn->u_nio != 0) { 1241 uvn->u_flags |= UVM_VNODE_IOSYNC; 1242 UVM_UNLOCK_AND_WAIT(&uvn->u_nio, &uvn->u_obj.vmobjlock, 1243 FALSE, "uvn_flush",0); 1244 simple_lock(&uvn->u_obj.vmobjlock); 1245 } 1246 if (uvn->u_flags & UVM_VNODE_IOSYNCWANTED) 1247 wakeup(&uvn->u_flags); 1248 uvn->u_flags &= ~(UVM_VNODE_IOSYNC|UVM_VNODE_IOSYNCWANTED); 1249 } 1250 1251 /* return, with object locked! */ 1252 UVMHIST_LOG(maphist,"<- done (retval=0x%x)",retval,0,0,0); 1253 return(retval); 1254 } 1255 1256 /* 1257 * uvn_cluster 1258 * 1259 * we are about to do I/O in an object at offset. this function is called 1260 * to establish a range of offsets around "offset" in which we can cluster 1261 * I/O. 1262 * 1263 * - currently doesn't matter if obj locked or not. 1264 */ 1265 1266 static void 1267 uvn_cluster(uobj, offset, loffset, hoffset) 1268 struct uvm_object *uobj; 1269 vaddr_t offset; 1270 vaddr_t *loffset, *hoffset; /* OUT */ 1271 { 1272 struct uvm_vnode *uvn = (struct uvm_vnode *) uobj; 1273 *loffset = offset; 1274 1275 if (*loffset >= uvn->u_size) 1276 panic("uvn_cluster: offset out of range"); 1277 1278 /* 1279 * XXX: old pager claims we could use VOP_BMAP to get maxcontig value. 1280 */ 1281 *hoffset = *loffset + MAXBSIZE; 1282 if (*hoffset > round_page(uvn->u_size)) /* past end? */ 1283 *hoffset = round_page(uvn->u_size); 1284 1285 return; 1286 } 1287 1288 /* 1289 * uvn_put: flush page data to backing store. 1290 * 1291 * => prefer map unlocked (not required) 1292 * => object must be locked! we will _unlock_ it before starting I/O. 1293 * => flags: PGO_SYNCIO -- use sync. I/O 1294 * => note: caller must set PG_CLEAN and pmap_clear_modify (if needed) 1295 * => XXX: currently we use VOP_READ/VOP_WRITE which are only sync. 1296 * [thus we never do async i/o! see iodone comment] 1297 */ 1298 1299 static int 1300 uvn_put(uobj, pps, npages, flags) 1301 struct uvm_object *uobj; 1302 struct vm_page **pps; 1303 int npages, flags; 1304 { 1305 int retval; 1306 1307 /* note: object locked */ 1308 retval = uvn_io((struct uvm_vnode*)uobj, pps, npages, flags, UIO_WRITE); 1309 /* note: object unlocked */ 1310 1311 return(retval); 1312 } 1313 1314 1315 /* 1316 * uvn_get: get pages (synchronously) from backing store 1317 * 1318 * => prefer map unlocked (not required) 1319 * => object must be locked! we will _unlock_ it before starting any I/O. 1320 * => flags: PGO_ALLPAGES: get all of the pages 1321 * PGO_LOCKED: fault data structures are locked 1322 * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx] 1323 * => NOTE: caller must check for released pages!! 1324 */ 1325 1326 static int 1327 uvn_get(uobj, offset, pps, npagesp, centeridx, access_type, advice, flags) 1328 struct uvm_object *uobj; 1329 vaddr_t offset; 1330 struct vm_page **pps; /* IN/OUT */ 1331 int *npagesp; /* IN (OUT if PGO_LOCKED) */ 1332 int centeridx, advice, flags; 1333 vm_prot_t access_type; 1334 { 1335 vaddr_t current_offset; 1336 struct vm_page *ptmp; 1337 int lcv, result, gotpages; 1338 boolean_t done; 1339 UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(maphist); 1340 UVMHIST_LOG(maphist, "flags=%d", flags,0,0,0); 1341 1342 /* 1343 * step 1: handled the case where fault data structures are locked. 1344 */ 1345 1346 if (flags & PGO_LOCKED) { 1347 1348 /* 1349 * gotpages is the current number of pages we've gotten (which 1350 * we pass back up to caller via *npagesp. 1351 */ 1352 1353 gotpages = 0; 1354 1355 /* 1356 * step 1a: get pages that are already resident. only do this 1357 * if the data structures are locked (i.e. the first time 1358 * through). 1359 */ 1360 1361 done = TRUE; /* be optimistic */ 1362 1363 for (lcv = 0, current_offset = offset ; lcv < *npagesp ; 1364 lcv++, current_offset += PAGE_SIZE) { 1365 1366 /* do we care about this page? if not, skip it */ 1367 if (pps[lcv] == PGO_DONTCARE) 1368 continue; 1369 1370 /* lookup page */ 1371 ptmp = uvm_pagelookup(uobj, current_offset); 1372 1373 /* to be useful must get a non-busy, non-released pg */ 1374 if (ptmp == NULL || 1375 (ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) { 1376 if (lcv == centeridx || (flags & PGO_ALLPAGES) 1377 != 0) 1378 done = FALSE; /* need to do a wait or I/O! */ 1379 continue; 1380 } 1381 1382 /* 1383 * useful page: busy/lock it and plug it in our 1384 * result array 1385 */ 1386 ptmp->flags |= PG_BUSY; /* loan up to caller */ 1387 UVM_PAGE_OWN(ptmp, "uvn_get1"); 1388 pps[lcv] = ptmp; 1389 gotpages++; 1390 1391 } /* "for" lcv loop */ 1392 1393 /* 1394 * XXX: given the "advice", should we consider async read-ahead? 1395 * XXX: fault current does deactive of pages behind us. is 1396 * this good (other callers might now). 1397 */ 1398 /* 1399 * XXX: read-ahead currently handled by buffer cache (bread) 1400 * level. 1401 * XXX: no async i/o available. 1402 * XXX: so we don't do anything now. 1403 */ 1404 1405 /* 1406 * step 1c: now we've either done everything needed or we to 1407 * unlock and do some waiting or I/O. 1408 */ 1409 1410 *npagesp = gotpages; /* let caller know */ 1411 if (done) 1412 return(VM_PAGER_OK); /* bingo! */ 1413 else 1414 /* EEK! Need to unlock and I/O */ 1415 return(VM_PAGER_UNLOCK); 1416 } 1417 1418 /* 1419 * step 2: get non-resident or busy pages. 1420 * object is locked. data structures are unlocked. 1421 * 1422 * XXX: because we can't do async I/O at this level we get things 1423 * page at a time (otherwise we'd chunk). the VOP_READ() will do 1424 * async-read-ahead for us at a lower level. 1425 */ 1426 1427 for (lcv = 0, current_offset = offset ; 1428 lcv < *npagesp ; lcv++, current_offset += PAGE_SIZE) { 1429 1430 /* skip over pages we've already gotten or don't want */ 1431 /* skip over pages we don't _have_ to get */ 1432 if (pps[lcv] != NULL || (lcv != centeridx && 1433 (flags & PGO_ALLPAGES) == 0)) 1434 continue; 1435 1436 /* 1437 * we have yet to locate the current page (pps[lcv]). we first 1438 * look for a page that is already at the current offset. if 1439 * we fine a page, we check to see if it is busy or released. 1440 * if that is the case, then we sleep on the page until it is 1441 * no longer busy or released and repeat the lookup. if the 1442 * page we found is neither busy nor released, then we busy it 1443 * (so we own it) and plug it into pps[lcv]. this breaks the 1444 * following while loop and indicates we are ready to move on 1445 * to the next page in the "lcv" loop above. 1446 * 1447 * if we exit the while loop with pps[lcv] still set to NULL, 1448 * then it means that we allocated a new busy/fake/clean page 1449 * ptmp in the object and we need to do I/O to fill in the data. 1450 */ 1451 1452 while (pps[lcv] == NULL) { /* top of "pps" while loop */ 1453 1454 /* look for a current page */ 1455 ptmp = uvm_pagelookup(uobj, current_offset); 1456 1457 /* nope? allocate one now (if we can) */ 1458 if (ptmp == NULL) { 1459 1460 ptmp = uvm_pagealloc(uobj, current_offset, 1461 NULL); /* alloc */ 1462 1463 /* out of RAM? */ 1464 if (ptmp == NULL) { 1465 simple_unlock(&uobj->vmobjlock); 1466 uvm_wait("uvn_getpage"); 1467 simple_lock(&uobj->vmobjlock); 1468 1469 /* goto top of pps while loop */ 1470 continue; 1471 } 1472 1473 /* 1474 * got new page ready for I/O. break pps 1475 * while loop. pps[lcv] is still NULL. 1476 */ 1477 break; 1478 } 1479 1480 /* page is there, see if we need to wait on it */ 1481 if ((ptmp->flags & (PG_BUSY|PG_RELEASED)) != 0) { 1482 ptmp->flags |= PG_WANTED; 1483 UVM_UNLOCK_AND_WAIT(ptmp, 1484 &uobj->vmobjlock, 0, "uvn_get",0); 1485 simple_lock(&uobj->vmobjlock); 1486 continue; /* goto top of pps while loop */ 1487 } 1488 1489 /* 1490 * if we get here then the page has become resident 1491 * and unbusy between steps 1 and 2. we busy it 1492 * now (so we own it) and set pps[lcv] (so that we 1493 * exit the while loop). 1494 */ 1495 ptmp->flags |= PG_BUSY; 1496 UVM_PAGE_OWN(ptmp, "uvn_get2"); 1497 pps[lcv] = ptmp; 1498 } 1499 1500 /* 1501 * if we own the a valid page at the correct offset, pps[lcv] 1502 * will point to it. nothing more to do except go to the 1503 * next page. 1504 */ 1505 1506 if (pps[lcv]) 1507 continue; /* next lcv */ 1508 1509 /* 1510 * we have a "fake/busy/clean" page that we just allocated. do 1511 * I/O to fill it with valid data. note that object must be 1512 * locked going into uvn_io, but will be unlocked afterwards. 1513 */ 1514 1515 result = uvn_io((struct uvm_vnode *) uobj, &ptmp, 1, 1516 PGO_SYNCIO, UIO_READ); 1517 1518 /* 1519 * I/O done. object is unlocked (by uvn_io). because we used 1520 * syncio the result can not be PEND or AGAIN. we must relock 1521 * and check for errors. 1522 */ 1523 1524 /* lock object. check for errors. */ 1525 simple_lock(&uobj->vmobjlock); 1526 if (result != VM_PAGER_OK) { 1527 if (ptmp->flags & PG_WANTED) 1528 /* object lock still held */ 1529 thread_wakeup(ptmp); 1530 1531 ptmp->flags &= ~(PG_WANTED|PG_BUSY); 1532 UVM_PAGE_OWN(ptmp, NULL); 1533 uvm_lock_pageq(); 1534 uvm_pagefree(ptmp); 1535 uvm_unlock_pageq(); 1536 simple_unlock(&uobj->vmobjlock); 1537 return(result); 1538 } 1539 1540 /* 1541 * we got the page! clear the fake flag (indicates valid 1542 * data now in page) and plug into our result array. note 1543 * that page is still busy. 1544 * 1545 * it is the callers job to: 1546 * => check if the page is released 1547 * => unbusy the page 1548 * => activate the page 1549 */ 1550 1551 ptmp->flags &= ~PG_FAKE; /* data is valid ... */ 1552 pmap_clear_modify(PMAP_PGARG(ptmp)); /* ... and clean */ 1553 pps[lcv] = ptmp; 1554 1555 } /* lcv loop */ 1556 1557 /* 1558 * finally, unlock object and return. 1559 */ 1560 1561 simple_unlock(&uobj->vmobjlock); 1562 return (VM_PAGER_OK); 1563 } 1564 1565 /* 1566 * uvn_asyncget: start async I/O to bring pages into ram 1567 * 1568 * => caller must lock object(???XXX: see if this is best) 1569 * => could be called from uvn_get or a madvise() fault-ahead. 1570 * => if it fails, it doesn't matter. 1571 */ 1572 1573 static int 1574 uvn_asyncget(uobj, offset, npages) 1575 struct uvm_object *uobj; 1576 vaddr_t offset; 1577 int npages; 1578 { 1579 1580 /* 1581 * XXXCDC: we can't do async I/O yet 1582 */ 1583 printf("uvn_asyncget called\n"); 1584 return (KERN_SUCCESS); 1585 } 1586 1587 /* 1588 * uvn_io: do I/O to a vnode 1589 * 1590 * => prefer map unlocked (not required) 1591 * => object must be locked! we will _unlock_ it before starting I/O. 1592 * => flags: PGO_SYNCIO -- use sync. I/O 1593 * => XXX: currently we use VOP_READ/VOP_WRITE which are only sync. 1594 * [thus we never do async i/o! see iodone comment] 1595 */ 1596 1597 static int 1598 uvn_io(uvn, pps, npages, flags, rw) 1599 struct uvm_vnode *uvn; 1600 vm_page_t *pps; 1601 int npages, flags, rw; 1602 { 1603 struct vnode *vn; 1604 struct uio uio; 1605 struct iovec iov; 1606 vaddr_t kva, file_offset; 1607 int waitf, result, got, wanted; 1608 UVMHIST_FUNC("uvn_io"); UVMHIST_CALLED(maphist); 1609 1610 UVMHIST_LOG(maphist, "rw=%d", rw,0,0,0); 1611 1612 /* 1613 * init values 1614 */ 1615 1616 waitf = (flags & PGO_SYNCIO) ? M_WAITOK : M_NOWAIT; 1617 vn = (struct vnode *) uvn; 1618 file_offset = pps[0]->offset; 1619 1620 /* 1621 * check for sync'ing I/O. 1622 */ 1623 1624 while (uvn->u_flags & UVM_VNODE_IOSYNC) { 1625 if (waitf == M_NOWAIT) { 1626 simple_unlock(&uvn->u_obj.vmobjlock); 1627 UVMHIST_LOG(maphist,"<- try again (iosync)",0,0,0,0); 1628 return(VM_PAGER_AGAIN); 1629 } 1630 uvn->u_flags |= UVM_VNODE_IOSYNCWANTED; 1631 UVM_UNLOCK_AND_WAIT(&uvn->u_flags, &uvn->u_obj.vmobjlock, 1632 FALSE, "uvn_iosync",0); 1633 simple_lock(&uvn->u_obj.vmobjlock); 1634 } 1635 1636 /* 1637 * check size 1638 */ 1639 1640 if (file_offset >= uvn->u_size) { 1641 simple_unlock(&uvn->u_obj.vmobjlock); 1642 UVMHIST_LOG(maphist,"<- BAD (size check)",0,0,0,0); 1643 #ifdef DIAGNOSTIC 1644 printf("uvn_io: note: size check fired\n"); 1645 #endif 1646 return(VM_PAGER_BAD); 1647 } 1648 1649 /* 1650 * first try and map the pages in (without waiting) 1651 */ 1652 1653 kva = uvm_pagermapin(pps, npages, NULL, M_NOWAIT); 1654 if (kva == NULL && waitf == M_NOWAIT) { 1655 simple_unlock(&uvn->u_obj.vmobjlock); 1656 UVMHIST_LOG(maphist,"<- mapin failed (try again)",0,0,0,0); 1657 return(VM_PAGER_AGAIN); 1658 } 1659 1660 /* 1661 * ok, now bump u_nio up. at this point we are done with uvn 1662 * and can unlock it. if we still don't have a kva, try again 1663 * (this time with sleep ok). 1664 */ 1665 1666 uvn->u_nio++; /* we have an I/O in progress! */ 1667 simple_unlock(&uvn->u_obj.vmobjlock); 1668 /* NOTE: object now unlocked */ 1669 if (kva == NULL) { 1670 kva = uvm_pagermapin(pps, npages, NULL, M_WAITOK); 1671 } 1672 1673 /* 1674 * ok, mapped in. our pages are PG_BUSY so they are not going to 1675 * get touched (so we can look at "offset" without having to lock 1676 * the object). set up for I/O. 1677 */ 1678 1679 /* 1680 * fill out uio/iov 1681 */ 1682 1683 iov.iov_base = (caddr_t) kva; 1684 wanted = npages << PAGE_SHIFT; 1685 if (file_offset + wanted > uvn->u_size) 1686 wanted = uvn->u_size - file_offset; /* XXX: needed? */ 1687 iov.iov_len = wanted; 1688 uio.uio_iov = &iov; 1689 uio.uio_iovcnt = 1; 1690 uio.uio_offset = file_offset; 1691 uio.uio_segflg = UIO_SYSSPACE; 1692 uio.uio_rw = rw; 1693 uio.uio_resid = wanted; 1694 uio.uio_procp = NULL; 1695 1696 /* 1697 * do the I/O! (XXX: curproc?) 1698 */ 1699 1700 UVMHIST_LOG(maphist, "calling VOP",0,0,0,0); 1701 1702 if ((uvn->u_flags & UVM_VNODE_VNISLOCKED) == 0) 1703 vn_lock(vn, LK_EXCLUSIVE | LK_RETRY, curproc /*XXX*/); 1704 /* NOTE: vnode now locked! */ 1705 1706 if (rw == UIO_READ) 1707 result = VOP_READ(vn, &uio, 0, curproc->p_ucred); 1708 else 1709 result = VOP_WRITE(vn, &uio, 0, curproc->p_ucred); 1710 1711 if ((uvn->u_flags & UVM_VNODE_VNISLOCKED) == 0) 1712 VOP_UNLOCK(vn, 0, curproc /*XXX*/); 1713 /* NOTE: vnode now unlocked (unless vnislocked) */ 1714 1715 UVMHIST_LOG(maphist, "done calling VOP",0,0,0,0); 1716 1717 /* 1718 * result == unix style errno (0 == OK!) 1719 * 1720 * zero out rest of buffer (if needed) 1721 */ 1722 1723 if (result == 0) { 1724 got = wanted - uio.uio_resid; 1725 1726 if (wanted && got == 0) { 1727 result = EIO; /* XXX: error? */ 1728 } else if (got < PAGE_SIZE * npages && rw == UIO_READ) { 1729 bzero((void *) (kva + got), 1730 (npages << PAGE_SHIFT) - got); 1731 } 1732 } 1733 1734 /* 1735 * now remove pager mapping 1736 */ 1737 uvm_pagermapout(kva, npages); 1738 1739 /* 1740 * now clean up the object (i.e. drop I/O count) 1741 */ 1742 1743 simple_lock(&uvn->u_obj.vmobjlock); 1744 /* NOTE: object now locked! */ 1745 1746 uvn->u_nio--; /* I/O DONE! */ 1747 if ((uvn->u_flags & UVM_VNODE_IOSYNC) != 0 && uvn->u_nio == 0) { 1748 wakeup(&uvn->u_nio); 1749 } 1750 simple_unlock(&uvn->u_obj.vmobjlock); 1751 /* NOTE: object now unlocked! */ 1752 1753 /* 1754 * done! 1755 */ 1756 1757 UVMHIST_LOG(maphist, "<- done (result %d)", result,0,0,0); 1758 if (result == 0) 1759 return(VM_PAGER_OK); 1760 else 1761 return(VM_PAGER_ERROR); 1762 } 1763 1764 /* 1765 * uvm_vnp_uncache: disable "persisting" in a vnode... when last reference 1766 * is gone we will kill the object (flushing dirty pages back to the vnode 1767 * if needed). 1768 * 1769 * => returns TRUE if there was no uvm_object attached or if there was 1770 * one and we killed it [i.e. if there is no active uvn] 1771 * => called with the vnode VOP_LOCK'd [we will unlock it for I/O, if 1772 * needed] 1773 * 1774 * => XXX: given that we now kill uvn's when a vnode is recycled (without 1775 * having to hold a reference on the vnode) and given a working 1776 * uvm_vnp_sync(), how does that effect the need for this function? 1777 * [XXXCDC: seems like it can die?] 1778 * 1779 * => XXX: this function should DIE once we merge the VM and buffer 1780 * cache. 1781 * 1782 * research shows that this is called in the following places: 1783 * ext2fs_truncate, ffs_truncate, detrunc[msdosfs]: called when vnode 1784 * changes sizes 1785 * ext2fs_write, WRITE [ufs_readwrite], msdosfs_write: called when we 1786 * are written to 1787 * ex2fs_chmod, ufs_chmod: called if VTEXT vnode and the sticky bit 1788 * is off 1789 * ffs_realloccg: when we can't extend the current block and have 1790 * to allocate a new one we call this [XXX: why?] 1791 * nfsrv_rename, rename_files: called when the target filename is there 1792 * and we want to remove it 1793 * nfsrv_remove, sys_unlink: called on file we are removing 1794 * nfsrv_access: if VTEXT and we want WRITE access and we don't uncache 1795 * then return "text busy" 1796 * nfs_open: seems to uncache any file opened with nfs 1797 * vn_writechk: if VTEXT vnode and can't uncache return "text busy" 1798 */ 1799 1800 boolean_t 1801 uvm_vnp_uncache(vp) 1802 struct vnode *vp; 1803 { 1804 struct uvm_vnode *uvn = &vp->v_uvm; 1805 1806 /* 1807 * lock uvn part of the vnode and check to see if we need to do anything 1808 */ 1809 1810 simple_lock(&uvn->u_obj.vmobjlock); 1811 if ((uvn->u_flags & UVM_VNODE_VALID) == 0 || 1812 (uvn->u_flags & UVM_VNODE_BLOCKED) != 0) { 1813 simple_unlock(&uvn->u_obj.vmobjlock); 1814 return(TRUE); 1815 } 1816 1817 /* 1818 * we have a valid, non-blocked uvn. clear persist flag. 1819 * if uvn is currently active we can return now. 1820 */ 1821 1822 uvn->u_flags &= ~UVM_VNODE_CANPERSIST; 1823 if (uvn->u_obj.uo_refs) { 1824 simple_unlock(&uvn->u_obj.vmobjlock); 1825 return(FALSE); 1826 } 1827 1828 /* 1829 * uvn is currently persisting! we have to gain a reference to 1830 * it so that we can call uvn_detach to kill the uvn. 1831 */ 1832 1833 VREF(vp); /* seems ok, even with VOP_LOCK */ 1834 uvn->u_obj.uo_refs++; /* value is now 1 */ 1835 simple_unlock(&uvn->u_obj.vmobjlock); 1836 1837 1838 #ifdef DEBUG 1839 /* 1840 * carry over sanity check from old vnode pager: the vnode should 1841 * be VOP_LOCK'd, and we confirm it here. 1842 */ 1843 if (!VOP_ISLOCKED(vp)) { 1844 boolean_t is_ok_anyway = FALSE; 1845 #ifdef NFS 1846 extern int (**nfsv2_vnodeop_p) __P((void *)); 1847 extern int (**spec_nfsv2nodeop_p) __P((void *)); 1848 extern int (**fifo_nfsv2nodeop_p) __P((void *)); 1849 1850 /* vnode is NOT VOP_LOCKed: some vnode types _never_ lock */ 1851 if (vp->v_op == nfsv2_vnodeop_p || 1852 vp->v_op == spec_nfsv2nodeop_p) { 1853 is_ok_anyway = TRUE; 1854 } 1855 if (vp->v_op == fifo_nfsv2nodeop_p) { 1856 is_ok_anyway = TRUE; 1857 } 1858 #endif /* NFS */ 1859 if (!is_ok_anyway) 1860 panic("uvm_vnp_uncache: vnode not locked!"); 1861 } 1862 #endif /* DEBUG */ 1863 1864 /* 1865 * now drop our reference to the vnode. if we have the sole 1866 * reference to the vnode then this will cause it to die [as we 1867 * just cleared the persist flag]. we have to unlock the vnode 1868 * while we are doing this as it may trigger I/O. 1869 * 1870 * XXX: it might be possible for uvn to get reclaimed while we are 1871 * unlocked causing us to return TRUE when we should not. we ignore 1872 * this as a false-positive return value doesn't hurt us. 1873 */ 1874 VOP_UNLOCK(vp, 0, curproc /*XXX*/); 1875 uvn_detach(&uvn->u_obj); 1876 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curproc/*XXX*/); 1877 1878 /* 1879 * and return... 1880 */ 1881 1882 return(TRUE); 1883 } 1884 1885 /* 1886 * uvm_vnp_setsize: grow or shrink a vnode uvn 1887 * 1888 * grow => just update size value 1889 * shrink => toss un-needed pages 1890 * 1891 * => we assume that the caller has a reference of some sort to the 1892 * vnode in question so that it will not be yanked out from under 1893 * us. 1894 * 1895 * called from: 1896 * => truncate fns (ext2fs_truncate, ffs_truncate, detrunc[msdos]) 1897 * => "write" fns (ext2fs_write, WRITE [ufs/ufs], msdosfs_write, nfs_write) 1898 * => ffs_balloc [XXX: why? doesn't WRITE handle?] 1899 * => NFS: nfs_loadattrcache, nfs_getattrcache, nfs_setattr 1900 * => union fs: union_newsize 1901 */ 1902 1903 void 1904 uvm_vnp_setsize(vp, newsize) 1905 struct vnode *vp; 1906 u_quad_t newsize; 1907 { 1908 struct uvm_vnode *uvn = &vp->v_uvm; 1909 1910 /* 1911 * lock uvn and check for valid object, and if valid: do it! 1912 */ 1913 simple_lock(&uvn->u_obj.vmobjlock); 1914 if (uvn->u_flags & UVM_VNODE_VALID) { 1915 1916 /* 1917 * make sure that the newsize fits within a vaddr_t 1918 * XXX: need to revise addressing data types 1919 */ 1920 1921 if (newsize > (vaddr_t) -PAGE_SIZE) { 1922 #ifdef DEBUG 1923 printf("uvm_vnp_setsize: vn %p size truncated " 1924 "%qx->%lx\n", vp, newsize, (vaddr_t)-PAGE_SIZE); 1925 #endif 1926 newsize = (vaddr_t)-PAGE_SIZE; 1927 } 1928 1929 /* 1930 * now check if the size has changed: if we shrink we had better 1931 * toss some pages... 1932 */ 1933 1934 if (uvn->u_size > newsize) { 1935 (void)uvn_flush(&uvn->u_obj, (vaddr_t) newsize, 1936 uvn->u_size, PGO_FREE); 1937 } 1938 uvn->u_size = (vaddr_t)newsize; 1939 } 1940 simple_unlock(&uvn->u_obj.vmobjlock); 1941 1942 /* 1943 * done 1944 */ 1945 return; 1946 } 1947 1948 /* 1949 * uvm_vnp_sync: flush all dirty VM pages back to their backing vnodes. 1950 * 1951 * => called from sys_sync with no VM structures locked 1952 * => only one process can do a sync at a time (because the uvn 1953 * structure only has one queue for sync'ing). we ensure this 1954 * by holding the uvn_sync_lock while the sync is in progress. 1955 * other processes attempting a sync will sleep on this lock 1956 * until we are done. 1957 */ 1958 1959 void 1960 uvm_vnp_sync(mp) 1961 struct mount *mp; 1962 { 1963 struct uvm_vnode *uvn; 1964 struct vnode *vp; 1965 boolean_t got_lock; 1966 1967 /* 1968 * step 1: ensure we are only ones using the uvn_sync_q by locking 1969 * our lock... 1970 */ 1971 lockmgr(&uvn_sync_lock, LK_EXCLUSIVE, (void *)0, curproc /*XXX*/); 1972 1973 /* 1974 * step 2: build up a simpleq of uvns of interest based on the 1975 * write list. we gain a reference to uvns of interest. must 1976 * be careful about locking uvn's since we will be holding uvn_wl_lock 1977 * in the body of the loop. 1978 */ 1979 SIMPLEQ_INIT(&uvn_sync_q); 1980 simple_lock(&uvn_wl_lock); 1981 for (uvn = uvn_wlist.lh_first ; uvn != NULL ; 1982 uvn = uvn->u_wlist.le_next) { 1983 1984 vp = (struct vnode *) uvn; 1985 if (mp && vp->v_mount != mp) 1986 continue; 1987 1988 /* attempt to gain reference */ 1989 while ((got_lock = simple_lock_try(&uvn->u_obj.vmobjlock)) == 1990 FALSE && 1991 (uvn->u_flags & UVM_VNODE_BLOCKED) == 0) 1992 /* spin */ ; 1993 1994 /* 1995 * we will exit the loop if either if the following are true: 1996 * - we got the lock [always true if NCPU == 1] 1997 * - we failed to get the lock but noticed the vnode was 1998 * "blocked" -- in this case the vnode must be a dying 1999 * vnode, and since dying vnodes are in the process of 2000 * being flushed out, we can safely skip this one 2001 * 2002 * we want to skip over the vnode if we did not get the lock, 2003 * or if the vnode is already dying (due to the above logic). 2004 * 2005 * note that uvn must already be valid because we found it on 2006 * the wlist (this also means it can't be ALOCK'd). 2007 */ 2008 if (!got_lock || (uvn->u_flags & UVM_VNODE_BLOCKED) != 0) { 2009 if (got_lock) 2010 simple_unlock(&uvn->u_obj.vmobjlock); 2011 continue; /* skip it */ 2012 } 2013 2014 /* 2015 * gain reference. watch out for persisting uvns (need to 2016 * regain vnode REF). 2017 */ 2018 if (uvn->u_obj.uo_refs == 0) 2019 VREF(vp); 2020 uvn->u_obj.uo_refs++; 2021 simple_unlock(&uvn->u_obj.vmobjlock); 2022 2023 /* 2024 * got it! 2025 */ 2026 SIMPLEQ_INSERT_HEAD(&uvn_sync_q, uvn, u_syncq); 2027 } 2028 simple_unlock(&uvn_wl_lock); 2029 2030 /* 2031 * step 3: we now have a list of uvn's that may need cleaning. 2032 * we are holding the uvn_sync_lock, but have dropped the uvn_wl_lock 2033 * (so we can now safely lock uvn's again). 2034 */ 2035 2036 for (uvn = uvn_sync_q.sqh_first ; uvn ; uvn = uvn->u_syncq.sqe_next) { 2037 simple_lock(&uvn->u_obj.vmobjlock); 2038 #ifdef DIAGNOSTIC 2039 if (uvn->u_flags & UVM_VNODE_DYING) { 2040 printf("uvm_vnp_sync: dying vnode on sync list\n"); 2041 } 2042 #endif 2043 uvn_flush(&uvn->u_obj, 0, 0, 2044 PGO_CLEANIT|PGO_ALLPAGES|PGO_DOACTCLUST); 2045 2046 /* 2047 * if we have the only reference and we just cleaned the uvn, 2048 * then we can pull it out of the UVM_VNODE_WRITEABLE state 2049 * thus allowing us to avoid thinking about flushing it again 2050 * on later sync ops. 2051 */ 2052 if (uvn->u_obj.uo_refs == 1 && 2053 (uvn->u_flags & UVM_VNODE_WRITEABLE)) { 2054 LIST_REMOVE(uvn, u_wlist); 2055 uvn->u_flags &= ~UVM_VNODE_WRITEABLE; 2056 } 2057 2058 simple_unlock(&uvn->u_obj.vmobjlock); 2059 2060 /* now drop our reference to the uvn */ 2061 uvn_detach(&uvn->u_obj); 2062 } 2063 2064 /* 2065 * done! release sync lock 2066 */ 2067 lockmgr(&uvn_sync_lock, LK_RELEASE, (void *)0, curproc /*XXX*/); 2068 } 2069