1 /* $NetBSD: uvm_vnode.c,v 1.73 2006/09/15 15:51:13 yamt Exp $ */ 2 3 /* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993 6 * The Regents of the University of California. 7 * Copyright (c) 1990 University of Utah. 8 * 9 * All rights reserved. 10 * 11 * This code is derived from software contributed to Berkeley by 12 * the Systems Programming Group of the University of Utah Computer 13 * Science Department. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by Charles D. Cranor, 26 * Washington University, the University of California, Berkeley and 27 * its contributors. 28 * 4. Neither the name of the University nor the names of its contributors 29 * may be used to endorse or promote products derived from this software 30 * without specific prior written permission. 31 * 32 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 33 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 34 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 35 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 36 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 40 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 41 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 42 * SUCH DAMAGE. 43 * 44 * @(#)vnode_pager.c 8.8 (Berkeley) 2/13/94 45 * from: Id: uvm_vnode.c,v 1.1.2.26 1998/02/02 20:38:07 chuck Exp 46 */ 47 48 /* 49 * uvm_vnode.c: the vnode pager. 50 */ 51 52 #include <sys/cdefs.h> 53 __KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.73 2006/09/15 15:51:13 yamt Exp $"); 54 55 #include "fs_nfs.h" 56 #include "opt_uvmhist.h" 57 #include "opt_ddb.h" 58 59 #include <sys/param.h> 60 #include <sys/systm.h> 61 #include <sys/kernel.h> 62 #include <sys/proc.h> 63 #include <sys/malloc.h> 64 #include <sys/vnode.h> 65 #include <sys/disklabel.h> 66 #include <sys/ioctl.h> 67 #include <sys/fcntl.h> 68 #include <sys/conf.h> 69 #include <sys/pool.h> 70 #include <sys/mount.h> 71 72 #include <miscfs/specfs/specdev.h> 73 74 #include <uvm/uvm.h> 75 #include <uvm/uvm_readahead.h> 76 77 /* 78 * functions 79 */ 80 81 static void uvn_detach(struct uvm_object *); 82 static int uvn_get(struct uvm_object *, voff_t, struct vm_page **, int *, 83 int, vm_prot_t, int, int); 84 static int uvn_put(struct uvm_object *, voff_t, voff_t, int); 85 static void uvn_reference(struct uvm_object *); 86 87 static int uvn_findpage(struct uvm_object *, voff_t, struct vm_page **, 88 int); 89 90 /* 91 * master pager structure 92 */ 93 94 struct uvm_pagerops uvm_vnodeops = { 95 NULL, 96 uvn_reference, 97 uvn_detach, 98 NULL, 99 uvn_get, 100 uvn_put, 101 }; 102 103 /* 104 * the ops! 105 */ 106 107 /* 108 * uvn_attach 109 * 110 * attach a vnode structure to a VM object. if the vnode is already 111 * attached, then just bump the reference count by one and return the 112 * VM object. if not already attached, attach and return the new VM obj. 113 * the "accessprot" tells the max access the attaching thread wants to 114 * our pages. 115 * 116 * => caller must _not_ already be holding the lock on the uvm_object. 117 * => in fact, nothing should be locked so that we can sleep here. 118 * => note that uvm_object is first thing in vnode structure, so their 119 * pointers are equiv. 120 */ 121 122 struct uvm_object * 123 uvn_attach(void *arg, vm_prot_t accessprot) 124 { 125 struct vnode *vp = arg; 126 struct uvm_object *uobj = &vp->v_uobj; 127 struct vattr vattr; 128 const struct bdevsw *bdev; 129 int result; 130 struct partinfo pi; 131 voff_t used_vnode_size; 132 UVMHIST_FUNC("uvn_attach"); UVMHIST_CALLED(maphist); 133 134 UVMHIST_LOG(maphist, "(vn=0x%x)", arg,0,0,0); 135 used_vnode_size = (voff_t)0; 136 137 /* 138 * first get a lock on the uobj. 139 */ 140 141 simple_lock(&uobj->vmobjlock); 142 while (vp->v_flag & VXLOCK) { 143 vp->v_flag |= VXWANT; 144 UVMHIST_LOG(maphist, " SLEEPING on blocked vn",0,0,0,0); 145 UVM_UNLOCK_AND_WAIT(uobj, &uobj->vmobjlock, FALSE, 146 "uvn_attach", 0); 147 simple_lock(&uobj->vmobjlock); 148 UVMHIST_LOG(maphist," WOKE UP",0,0,0,0); 149 } 150 151 /* 152 * if we're mapping a BLK device, make sure it is a disk. 153 */ 154 if (vp->v_type == VBLK) { 155 bdev = bdevsw_lookup(vp->v_rdev); 156 if (bdev == NULL || bdev->d_type != D_DISK) { 157 simple_unlock(&uobj->vmobjlock); 158 UVMHIST_LOG(maphist,"<- done (VBLK not D_DISK!)", 159 0,0,0,0); 160 return(NULL); 161 } 162 } 163 KASSERT(vp->v_type == VREG || vp->v_type == VBLK); 164 165 /* 166 * set up our idea of the size 167 * if this hasn't been done already. 168 */ 169 if (vp->v_size == VSIZENOTSET) { 170 171 172 vp->v_flag |= VXLOCK; 173 simple_unlock(&uobj->vmobjlock); /* drop lock in case we sleep */ 174 /* XXX: curproc? */ 175 if (vp->v_type == VBLK) { 176 /* 177 * We could implement this as a specfs getattr call, but: 178 * 179 * (1) VOP_GETATTR() would get the file system 180 * vnode operation, not the specfs operation. 181 * 182 * (2) All we want is the size, anyhow. 183 */ 184 bdev = bdevsw_lookup(vp->v_rdev); 185 if (bdev != NULL) { 186 result = (*bdev->d_ioctl)(vp->v_rdev, DIOCGPART, 187 (caddr_t)&pi, FREAD, curlwp); 188 } else { 189 result = ENXIO; 190 } 191 if (result == 0) { 192 /* XXX should remember blocksize */ 193 used_vnode_size = (voff_t)pi.disklab->d_secsize * 194 (voff_t)pi.part->p_size; 195 } 196 } else { 197 result = VOP_GETATTR(vp, &vattr, curlwp->l_cred, curlwp); 198 if (result == 0) 199 used_vnode_size = vattr.va_size; 200 } 201 202 /* relock object */ 203 simple_lock(&uobj->vmobjlock); 204 205 if (vp->v_flag & VXWANT) { 206 wakeup(vp); 207 } 208 vp->v_flag &= ~(VXLOCK|VXWANT); 209 210 if (result != 0) { 211 simple_unlock(&uobj->vmobjlock); 212 UVMHIST_LOG(maphist,"<- done (VOP_GETATTR FAILED!)", 0,0,0,0); 213 return(NULL); 214 } 215 vp->v_size = used_vnode_size; 216 217 } 218 219 simple_unlock(&uobj->vmobjlock); 220 UVMHIST_LOG(maphist,"<- done, refcnt=%d", vp->v_usecount, 221 0, 0, 0); 222 return uobj; 223 } 224 225 226 /* 227 * uvn_reference 228 * 229 * duplicate a reference to a VM object. Note that the reference 230 * count must already be at least one (the passed in reference) so 231 * there is no chance of the uvn being killed or locked out here. 232 * 233 * => caller must call with object unlocked. 234 * => caller must be using the same accessprot as was used at attach time 235 */ 236 237 static void 238 uvn_reference(struct uvm_object *uobj) 239 { 240 VREF((struct vnode *)uobj); 241 } 242 243 244 /* 245 * uvn_detach 246 * 247 * remove a reference to a VM object. 248 * 249 * => caller must call with object unlocked and map locked. 250 */ 251 252 static void 253 uvn_detach(struct uvm_object *uobj) 254 { 255 vrele((struct vnode *)uobj); 256 } 257 258 /* 259 * uvn_put: flush page data to backing store. 260 * 261 * => object must be locked on entry! VOP_PUTPAGES must unlock it. 262 * => flags: PGO_SYNCIO -- use sync. I/O 263 * => note: caller must set PG_CLEAN and pmap_clear_modify (if needed) 264 */ 265 266 static int 267 uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags) 268 { 269 struct vnode *vp = (struct vnode *)uobj; 270 int error; 271 272 LOCK_ASSERT(simple_lock_held(&vp->v_interlock)); 273 error = VOP_PUTPAGES(vp, offlo, offhi, flags); 274 LOCK_ASSERT(!simple_lock_held(&vp->v_interlock)); 275 return error; 276 } 277 278 279 /* 280 * uvn_get: get pages (synchronously) from backing store 281 * 282 * => prefer map unlocked (not required) 283 * => object must be locked! we will _unlock_ it before starting any I/O. 284 * => flags: PGO_ALLPAGES: get all of the pages 285 * PGO_LOCKED: fault data structures are locked 286 * => NOTE: offset is the offset of pps[0], _NOT_ pps[centeridx] 287 * => NOTE: caller must check for released pages!! 288 */ 289 290 static int 291 uvn_get(struct uvm_object *uobj, voff_t offset, 292 struct vm_page **pps /* IN/OUT */, 293 int *npagesp /* IN (OUT if PGO_LOCKED)*/, 294 int centeridx, vm_prot_t access_type, int advice, int flags) 295 { 296 struct vnode *vp = (struct vnode *)uobj; 297 int error; 298 299 UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist); 300 301 UVMHIST_LOG(ubchist, "vp %p off 0x%x", vp, (int)offset, 0,0); 302 303 if ((access_type & VM_PROT_WRITE) == 0 && (flags & PGO_LOCKED) == 0) { 304 simple_unlock(&vp->v_interlock); 305 vn_ra_allocctx(vp); 306 uvm_ra_request(vp->v_ractx, advice, uobj, offset, 307 *npagesp << PAGE_SHIFT); 308 simple_lock(&vp->v_interlock); 309 } 310 311 error = VOP_GETPAGES(vp, offset, pps, npagesp, centeridx, 312 access_type, advice, flags); 313 314 return error; 315 } 316 317 318 /* 319 * uvn_findpages: 320 * return the page for the uobj and offset requested, allocating if needed. 321 * => uobj must be locked. 322 * => returned pages will be BUSY. 323 */ 324 325 int 326 uvn_findpages(struct uvm_object *uobj, voff_t offset, int *npagesp, 327 struct vm_page **pgs, int flags) 328 { 329 int i, count, found, npages, rv; 330 331 count = found = 0; 332 npages = *npagesp; 333 if (flags & UFP_BACKWARD) { 334 for (i = npages - 1; i >= 0; i--, offset -= PAGE_SIZE) { 335 rv = uvn_findpage(uobj, offset, &pgs[i], flags); 336 if (rv == 0) { 337 if (flags & UFP_DIRTYONLY) 338 break; 339 } else 340 found++; 341 count++; 342 } 343 } else { 344 for (i = 0; i < npages; i++, offset += PAGE_SIZE) { 345 rv = uvn_findpage(uobj, offset, &pgs[i], flags); 346 if (rv == 0) { 347 if (flags & UFP_DIRTYONLY) 348 break; 349 } else 350 found++; 351 count++; 352 } 353 } 354 *npagesp = count; 355 return (found); 356 } 357 358 static int 359 uvn_findpage(struct uvm_object *uobj, voff_t offset, struct vm_page **pgp, 360 int flags) 361 { 362 struct vm_page *pg; 363 boolean_t dirty; 364 UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist); 365 UVMHIST_LOG(ubchist, "vp %p off 0x%lx", uobj, offset,0,0); 366 367 if (*pgp != NULL) { 368 UVMHIST_LOG(ubchist, "dontcare", 0,0,0,0); 369 return 0; 370 } 371 for (;;) { 372 /* look for an existing page */ 373 pg = uvm_pagelookup(uobj, offset); 374 375 /* nope? allocate one now */ 376 if (pg == NULL) { 377 if (flags & UFP_NOALLOC) { 378 UVMHIST_LOG(ubchist, "noalloc", 0,0,0,0); 379 return 0; 380 } 381 pg = uvm_pagealloc(uobj, offset, NULL, 0); 382 if (pg == NULL) { 383 if (flags & UFP_NOWAIT) { 384 UVMHIST_LOG(ubchist, "nowait",0,0,0,0); 385 return 0; 386 } 387 simple_unlock(&uobj->vmobjlock); 388 uvm_wait("uvn_fp1"); 389 simple_lock(&uobj->vmobjlock); 390 continue; 391 } 392 UVMHIST_LOG(ubchist, "alloced %p", pg,0,0,0); 393 break; 394 } else if (flags & UFP_NOCACHE) { 395 UVMHIST_LOG(ubchist, "nocache",0,0,0,0); 396 return 0; 397 } 398 399 /* page is there, see if we need to wait on it */ 400 if ((pg->flags & PG_BUSY) != 0) { 401 if (flags & UFP_NOWAIT) { 402 UVMHIST_LOG(ubchist, "nowait",0,0,0,0); 403 return 0; 404 } 405 pg->flags |= PG_WANTED; 406 UVMHIST_LOG(ubchist, "wait %p", pg,0,0,0); 407 UVM_UNLOCK_AND_WAIT(pg, &uobj->vmobjlock, 0, 408 "uvn_fp2", 0); 409 simple_lock(&uobj->vmobjlock); 410 continue; 411 } 412 413 /* skip PG_RDONLY pages if requested */ 414 if ((flags & UFP_NORDONLY) && (pg->flags & PG_RDONLY)) { 415 UVMHIST_LOG(ubchist, "nordonly",0,0,0,0); 416 return 0; 417 } 418 419 /* stop on clean pages if requested */ 420 if (flags & UFP_DIRTYONLY) { 421 dirty = pmap_clear_modify(pg) || 422 (pg->flags & PG_CLEAN) == 0; 423 pg->flags |= PG_CLEAN; 424 if (!dirty) { 425 UVMHIST_LOG(ubchist, "dirtonly", 0,0,0,0); 426 return 0; 427 } 428 } 429 430 /* mark the page BUSY and we're done. */ 431 pg->flags |= PG_BUSY; 432 UVM_PAGE_OWN(pg, "uvn_findpage"); 433 UVMHIST_LOG(ubchist, "found %p", pg,0,0,0); 434 break; 435 } 436 *pgp = pg; 437 return 1; 438 } 439 440 /* 441 * uvm_vnp_setsize: grow or shrink a vnode uobj 442 * 443 * grow => just update size value 444 * shrink => toss un-needed pages 445 * 446 * => we assume that the caller has a reference of some sort to the 447 * vnode in question so that it will not be yanked out from under 448 * us. 449 */ 450 451 void 452 uvm_vnp_setsize(struct vnode *vp, voff_t newsize) 453 { 454 struct uvm_object *uobj = &vp->v_uobj; 455 voff_t pgend = round_page(newsize); 456 voff_t oldsize; 457 UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist); 458 459 simple_lock(&uobj->vmobjlock); 460 UVMHIST_LOG(ubchist, "vp %p old 0x%x new 0x%x", 461 vp, vp->v_size, newsize, 0); 462 463 /* 464 * now check if the size has changed: if we shrink we had better 465 * toss some pages... 466 */ 467 468 oldsize = vp->v_size; 469 vp->v_size = newsize; 470 if (oldsize > pgend && oldsize != VSIZENOTSET) { 471 (void) uvn_put(uobj, pgend, 0, PGO_FREE | PGO_SYNCIO); 472 } else { 473 simple_unlock(&uobj->vmobjlock); 474 } 475 } 476 477 /* 478 * uvm_vnp_zerorange: set a range of bytes in a file to zero. 479 */ 480 481 void 482 uvm_vnp_zerorange(struct vnode *vp, off_t off, size_t len) 483 { 484 void *win; 485 int flags; 486 487 /* 488 * XXXUBC invent kzero() and use it 489 */ 490 491 while (len) { 492 vsize_t bytelen = len; 493 494 win = ubc_alloc(&vp->v_uobj, off, &bytelen, UVM_ADV_NORMAL, 495 UBC_WRITE); 496 memset(win, 0, bytelen); 497 flags = UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0; 498 ubc_release(win, flags); 499 500 off += bytelen; 501 len -= bytelen; 502 } 503 } 504