1 /* 2 * Copyright (c) 1990 University of Utah. 3 * Copyright (c) 1991, 1993 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the Systems Programming Group of the University of Utah Computer 8 * Science Department. 9 * 10 * %sccs.include.redist.c% 11 * 12 * @(#)vnode_pager.c 8.5 (Berkeley) 01/12/94 13 */ 14 15 /* 16 * Page to/from files (vnodes). 17 * 18 * TODO: 19 * pageouts 20 * fix credential use (uses current process credentials now) 21 */ 22 23 #include <sys/param.h> 24 #include <sys/systm.h> 25 #include <sys/proc.h> 26 #include <sys/malloc.h> 27 #include <sys/vnode.h> 28 #include <sys/uio.h> 29 #include <sys/mount.h> 30 31 #include <vm/vm.h> 32 #include <vm/vm_page.h> 33 #include <vm/vnode_pager.h> 34 35 struct pagerlst vnode_pager_list; /* list of managed vnodes */ 36 37 #ifdef DEBUG 38 int vpagerdebug = 0x00; 39 #define VDB_FOLLOW 0x01 40 #define VDB_INIT 0x02 41 #define VDB_IO 0x04 42 #define VDB_FAIL 0x08 43 #define VDB_ALLOC 0x10 44 #define VDB_SIZE 0x20 45 #endif 46 47 static vm_pager_t vnode_pager_alloc 48 __P((caddr_t, vm_size_t, vm_prot_t, vm_offset_t)); 49 static void vnode_pager_cluster 50 __P((vm_pager_t, vm_offset_t, 51 vm_offset_t *, vm_offset_t *)); 52 static void vnode_pager_dealloc __P((vm_pager_t)); 53 static int vnode_pager_getpage 54 __P((vm_pager_t, vm_page_t *, int, boolean_t)); 55 static boolean_t vnode_pager_haspage __P((vm_pager_t, vm_offset_t)); 56 static void vnode_pager_init __P((void)); 57 static int vnode_pager_io 58 __P((vn_pager_t, vm_page_t *, int, 59 boolean_t, enum uio_rw)); 60 static boolean_t vnode_pager_putpage 61 __P((vm_pager_t, vm_page_t *, int, boolean_t)); 62 63 struct pagerops vnodepagerops = { 64 vnode_pager_init, 65 vnode_pager_alloc, 66 vnode_pager_dealloc, 67 vnode_pager_getpage, 68 vnode_pager_putpage, 69 vnode_pager_haspage, 70 vnode_pager_cluster 71 }; 72 73 static void 74 vnode_pager_init() 75 { 76 #ifdef DEBUG 77 if (vpagerdebug & VDB_FOLLOW) 78 printf("vnode_pager_init()\n"); 79 #endif 80 TAILQ_INIT(&vnode_pager_list); 81 } 82 83 /* 84 * Allocate (or lookup) pager for a vnode. 85 * Handle is a vnode pointer. 86 */ 87 static vm_pager_t 88 vnode_pager_alloc(handle, size, prot, foff) 89 caddr_t handle; 90 vm_size_t size; 91 vm_prot_t prot; 92 vm_offset_t foff; 93 { 94 register vm_pager_t pager; 95 register vn_pager_t vnp; 96 vm_object_t object; 97 struct vattr vattr; 98 struct vnode *vp; 99 struct proc *p = curproc; /* XXX */ 100 101 #ifdef DEBUG 102 if (vpagerdebug & (VDB_FOLLOW|VDB_ALLOC)) 103 printf("vnode_pager_alloc(%x, %x, %x)\n", handle, size, prot); 104 #endif 105 /* 106 * Pageout to vnode, no can do yet. 107 */ 108 if (handle == NULL) 109 return(NULL); 110 111 /* 112 * Vnodes keep a pointer to any associated pager so no need to 113 * lookup with vm_pager_lookup. 114 */ 115 vp = (struct vnode *)handle; 116 pager = (vm_pager_t)vp->v_vmdata; 117 if (pager == NULL) { 118 /* 119 * Allocate pager structures 120 */ 121 pager = (vm_pager_t)malloc(sizeof *pager, M_VMPAGER, M_WAITOK); 122 if (pager == NULL) 123 return(NULL); 124 vnp = (vn_pager_t)malloc(sizeof *vnp, M_VMPGDATA, M_WAITOK); 125 if (vnp == NULL) { 126 free((caddr_t)pager, M_VMPAGER); 127 return(NULL); 128 } 129 /* 130 * And an object of the appropriate size 131 */ 132 if (VOP_GETATTR(vp, &vattr, p->p_ucred, p) == 0) { 133 object = vm_object_allocate(round_page(vattr.va_size)); 134 vm_object_enter(object, pager); 135 vm_object_setpager(object, pager, 0, TRUE); 136 } else { 137 free((caddr_t)vnp, M_VMPGDATA); 138 free((caddr_t)pager, M_VMPAGER); 139 return(NULL); 140 } 141 /* 142 * Hold a reference to the vnode and initialize pager data. 143 */ 144 VREF(vp); 145 vnp->vnp_flags = 0; 146 vnp->vnp_vp = vp; 147 vnp->vnp_size = vattr.va_size; 148 TAILQ_INSERT_TAIL(&vnode_pager_list, pager, pg_list); 149 pager->pg_handle = handle; 150 pager->pg_type = PG_VNODE; 151 pager->pg_flags = 0; 152 pager->pg_ops = &vnodepagerops; 153 pager->pg_data = vnp; 154 vp->v_vmdata = (caddr_t)pager; 155 } else { 156 /* 157 * vm_object_lookup() will remove the object from the 158 * cache if found and also gain a reference to the object. 159 */ 160 object = vm_object_lookup(pager); 161 #ifdef DEBUG 162 vnp = (vn_pager_t)pager->pg_data; 163 #endif 164 } 165 #ifdef DEBUG 166 if (vpagerdebug & VDB_ALLOC) 167 printf("vnode_pager_setup: vp %x sz %x pager %x object %x\n", 168 vp, vnp->vnp_size, pager, object); 169 #endif 170 return(pager); 171 } 172 173 static void 174 vnode_pager_dealloc(pager) 175 vm_pager_t pager; 176 { 177 register vn_pager_t vnp = (vn_pager_t)pager->pg_data; 178 register struct vnode *vp; 179 struct proc *p = curproc; /* XXX */ 180 181 #ifdef DEBUG 182 if (vpagerdebug & VDB_FOLLOW) 183 printf("vnode_pager_dealloc(%x)\n", pager); 184 #endif 185 if (vp = vnp->vnp_vp) { 186 vp->v_vmdata = NULL; 187 vp->v_flag &= ~VTEXT; 188 #if 0 189 /* can hang if done at reboot on NFS FS */ 190 (void) VOP_FSYNC(vp, p->p_ucred, p); 191 #endif 192 vrele(vp); 193 } 194 TAILQ_REMOVE(&vnode_pager_list, pager, pg_list); 195 free((caddr_t)vnp, M_VMPGDATA); 196 free((caddr_t)pager, M_VMPAGER); 197 } 198 199 static int 200 vnode_pager_getpage(pager, mlist, npages, sync) 201 vm_pager_t pager; 202 vm_page_t *mlist; 203 int npages; 204 boolean_t sync; 205 { 206 207 #ifdef DEBUG 208 if (vpagerdebug & VDB_FOLLOW) 209 printf("vnode_pager_getpage(%x, %x, %x, %x)\n", 210 pager, mlist, npages, sync); 211 #endif 212 return(vnode_pager_io((vn_pager_t)pager->pg_data, 213 mlist, npages, sync, UIO_READ)); 214 } 215 216 static boolean_t 217 vnode_pager_putpage(pager, mlist, npages, sync) 218 vm_pager_t pager; 219 vm_page_t *mlist; 220 int npages; 221 boolean_t sync; 222 { 223 int err; 224 225 #ifdef DEBUG 226 if (vpagerdebug & VDB_FOLLOW) 227 printf("vnode_pager_putpage(%x, %x, %x, %x)\n", 228 pager, mlist, npages, sync); 229 #endif 230 if (pager == NULL) 231 return (FALSE); /* ??? */ 232 err = vnode_pager_io((vn_pager_t)pager->pg_data, 233 mlist, npages, sync, UIO_WRITE); 234 /* 235 * If the operation was successful, mark the pages clean. 236 */ 237 if (err == VM_PAGER_OK) { 238 while (npages--) { 239 (*mlist)->flags |= PG_CLEAN; 240 pmap_clear_modify(VM_PAGE_TO_PHYS(*mlist)); 241 mlist++; 242 } 243 } 244 return(err); 245 } 246 247 static boolean_t 248 vnode_pager_haspage(pager, offset) 249 vm_pager_t pager; 250 vm_offset_t offset; 251 { 252 register vn_pager_t vnp = (vn_pager_t)pager->pg_data; 253 daddr_t bn; 254 int err; 255 256 #ifdef DEBUG 257 if (vpagerdebug & VDB_FOLLOW) 258 printf("vnode_pager_haspage(%x, %x)\n", pager, offset); 259 #endif 260 261 /* 262 * Offset beyond end of file, do not have the page 263 */ 264 if (offset >= vnp->vnp_size) { 265 #ifdef DEBUG 266 if (vpagerdebug & (VDB_FAIL|VDB_SIZE)) 267 printf("vnode_pager_haspage: pg %x, off %x, size %x\n", 268 pager, offset, vnp->vnp_size); 269 #endif 270 return(FALSE); 271 } 272 273 /* 274 * Read the index to find the disk block to read 275 * from. If there is no block, report that we don't 276 * have this data. 277 * 278 * Assumes that the vnode has whole page or nothing. 279 */ 280 VOP_LOCK(vnp->vnp_vp); 281 err = VOP_BMAP(vnp->vnp_vp, 282 offset / vnp->vnp_vp->v_mount->mnt_stat.f_iosize, 283 (struct vnode **)0, &bn, NULL); 284 VOP_UNLOCK(vnp->vnp_vp); 285 if (err) { 286 #ifdef DEBUG 287 if (vpagerdebug & VDB_FAIL) 288 printf("vnode_pager_haspage: BMAP err %d, pg %x, off %x\n", 289 err, pager, offset); 290 #endif 291 return(TRUE); 292 } 293 return((long)bn < 0 ? FALSE : TRUE); 294 } 295 296 static void 297 vnode_pager_cluster(pager, offset, loffset, hoffset) 298 vm_pager_t pager; 299 vm_offset_t offset; 300 vm_offset_t *loffset; 301 vm_offset_t *hoffset; 302 { 303 vn_pager_t vnp = (vn_pager_t)pager->pg_data; 304 vm_offset_t loff, hoff; 305 306 #ifdef DEBUG 307 if (vpagerdebug & VDB_FOLLOW) 308 printf("vnode_pager_cluster(%x, %x) ", pager, offset); 309 #endif 310 loff = offset; 311 if (loff >= vnp->vnp_size) 312 panic("vnode_pager_cluster: bad offset"); 313 /* 314 * XXX could use VOP_BMAP to get maxcontig value 315 */ 316 hoff = loff + MAXBSIZE; 317 if (hoff > round_page(vnp->vnp_size)) 318 hoff = round_page(vnp->vnp_size); 319 320 *loffset = loff; 321 *hoffset = hoff; 322 #ifdef DEBUG 323 if (vpagerdebug & VDB_FOLLOW) 324 printf("returns [%x-%x]\n", loff, hoff); 325 #endif 326 } 327 328 /* 329 * (XXX) 330 * Lets the VM system know about a change in size for a file. 331 * If this vnode is mapped into some address space (i.e. we have a pager 332 * for it) we adjust our own internal size and flush any cached pages in 333 * the associated object that are affected by the size change. 334 * 335 * Note: this routine may be invoked as a result of a pager put 336 * operation (possibly at object termination time), so we must be careful. 337 */ 338 void 339 vnode_pager_setsize(vp, nsize) 340 struct vnode *vp; 341 u_long nsize; 342 { 343 register vn_pager_t vnp; 344 register vm_object_t object; 345 vm_pager_t pager; 346 347 /* 348 * Not a mapped vnode 349 */ 350 if (vp == NULL || vp->v_type != VREG || vp->v_vmdata == NULL) 351 return; 352 /* 353 * Hasn't changed size 354 */ 355 pager = (vm_pager_t)vp->v_vmdata; 356 vnp = (vn_pager_t)pager->pg_data; 357 if (nsize == vnp->vnp_size) 358 return; 359 /* 360 * No object. 361 * This can happen during object termination since 362 * vm_object_page_clean is called after the object 363 * has been removed from the hash table, and clean 364 * may cause vnode write operations which can wind 365 * up back here. 366 */ 367 object = vm_object_lookup(pager); 368 if (object == NULL) 369 return; 370 371 #ifdef DEBUG 372 if (vpagerdebug & (VDB_FOLLOW|VDB_SIZE)) 373 printf("vnode_pager_setsize: vp %x obj %x osz %d nsz %d\n", 374 vp, object, vnp->vnp_size, nsize); 375 #endif 376 /* 377 * File has shrunk. 378 * Toss any cached pages beyond the new EOF. 379 */ 380 if (nsize < vnp->vnp_size) { 381 vm_object_lock(object); 382 vm_object_page_remove(object, 383 (vm_offset_t)nsize, vnp->vnp_size); 384 vm_object_unlock(object); 385 } 386 vnp->vnp_size = (vm_offset_t)nsize; 387 vm_object_deallocate(object); 388 } 389 390 void 391 vnode_pager_umount(mp) 392 register struct mount *mp; 393 { 394 register vm_pager_t pager, npager; 395 struct vnode *vp; 396 397 for (pager = vnode_pager_list.tqh_first; pager != NULL; pager = npager){ 398 /* 399 * Save the next pointer now since uncaching may 400 * terminate the object and render pager invalid 401 */ 402 npager = pager->pg_list.tqe_next; 403 vp = ((vn_pager_t)pager->pg_data)->vnp_vp; 404 if (mp == (struct mount *)0 || vp->v_mount == mp) { 405 VOP_LOCK(vp); 406 (void) vnode_pager_uncache(vp); 407 VOP_UNLOCK(vp); 408 } 409 } 410 } 411 412 /* 413 * Remove vnode associated object from the object cache. 414 * 415 * XXX unlock the vnode if it is currently locked. 416 * We must do this since uncaching the object may result in its 417 * destruction which may initiate paging activity which may necessitate 418 * re-locking the vnode. 419 */ 420 boolean_t 421 vnode_pager_uncache(vp) 422 register struct vnode *vp; 423 { 424 register vm_object_t object; 425 boolean_t uncached; 426 vm_pager_t pager; 427 428 /* 429 * Not a mapped vnode 430 */ 431 pager = (vm_pager_t)vp->v_vmdata; 432 if (pager == NULL) 433 return (TRUE); 434 #ifdef DEBUG 435 if (!VOP_ISLOCKED(vp)) { 436 extern int (**nfsv2_vnodeop_p)(); 437 438 if (vp->v_op != nfsv2_vnodeop_p) 439 panic("vnode_pager_uncache: vnode not locked!"); 440 } 441 #endif 442 /* 443 * Must use vm_object_lookup() as it actually removes 444 * the object from the cache list. 445 */ 446 object = vm_object_lookup(pager); 447 if (object) { 448 uncached = (object->ref_count <= 1); 449 VOP_UNLOCK(vp); 450 pager_cache(object, FALSE); 451 VOP_LOCK(vp); 452 } else 453 uncached = TRUE; 454 return(uncached); 455 } 456 457 static int 458 vnode_pager_io(vnp, mlist, npages, sync, rw) 459 register vn_pager_t vnp; 460 vm_page_t *mlist; 461 int npages; 462 boolean_t sync; 463 enum uio_rw rw; 464 { 465 struct uio auio; 466 struct iovec aiov; 467 vm_offset_t kva, foff; 468 int error, size; 469 struct proc *p = curproc; /* XXX */ 470 471 /* XXX */ 472 vm_page_t m; 473 if (npages != 1) 474 panic("vnode_pager_io: cannot handle multiple pages"); 475 m = *mlist; 476 /* XXX */ 477 478 #ifdef DEBUG 479 if (vpagerdebug & VDB_FOLLOW) 480 printf("vnode_pager_io(%x, %x, %c): vnode %x\n", 481 vnp, m, rw == UIO_READ ? 'R' : 'W', vnp->vnp_vp); 482 #endif 483 foff = m->offset + m->object->paging_offset; 484 /* 485 * Return failure if beyond current EOF 486 */ 487 if (foff >= vnp->vnp_size) { 488 #ifdef DEBUG 489 if (vpagerdebug & VDB_SIZE) 490 printf("vnode_pager_io: vp %x, off %d size %d\n", 491 vnp->vnp_vp, foff, vnp->vnp_size); 492 #endif 493 return(VM_PAGER_BAD); 494 } 495 if (foff + PAGE_SIZE > vnp->vnp_size) 496 size = vnp->vnp_size - foff; 497 else 498 size = PAGE_SIZE; 499 /* 500 * Allocate a kernel virtual address and initialize so that 501 * we can use VOP_READ/WRITE routines. 502 */ 503 kva = vm_pager_map_pages(mlist, npages, sync); 504 if (kva == NULL) 505 return(VM_PAGER_AGAIN); 506 aiov.iov_base = (caddr_t)kva; 507 aiov.iov_len = size; 508 auio.uio_iov = &aiov; 509 auio.uio_iovcnt = 1; 510 auio.uio_offset = foff; 511 auio.uio_segflg = UIO_SYSSPACE; 512 auio.uio_rw = rw; 513 auio.uio_resid = size; 514 auio.uio_procp = (struct proc *)0; 515 #ifdef DEBUG 516 if (vpagerdebug & VDB_IO) 517 printf("vnode_pager_io: vp %x kva %x foff %x size %x", 518 vnp->vnp_vp, kva, foff, size); 519 #endif 520 VOP_LOCK(vnp->vnp_vp); 521 if (rw == UIO_READ) 522 error = VOP_READ(vnp->vnp_vp, &auio, 0, p->p_ucred); 523 else 524 error = VOP_WRITE(vnp->vnp_vp, &auio, 0, p->p_ucred); 525 VOP_UNLOCK(vnp->vnp_vp); 526 #ifdef DEBUG 527 if (vpagerdebug & VDB_IO) { 528 if (error || auio.uio_resid) 529 printf(" returns error %x, resid %x", 530 error, auio.uio_resid); 531 printf("\n"); 532 } 533 #endif 534 if (!error) { 535 register int count = size - auio.uio_resid; 536 537 if (count == 0) 538 error = EINVAL; 539 else if (count != PAGE_SIZE && rw == UIO_READ) 540 bzero((void *)(kva + count), PAGE_SIZE - count); 541 } 542 vm_pager_unmap_pages(kva, npages); 543 return (error ? VM_PAGER_ERROR : VM_PAGER_OK); 544 } 545