1 /* $NetBSD: uvm_pager.c,v 1.91 2008/02/29 20:35:23 yamt Exp $ */ 2 3 /* 4 * 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Charles D. Cranor and 19 * Washington University. 20 * 4. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp 35 */ 36 37 /* 38 * uvm_pager.c: generic functions used to assist the pagers. 39 */ 40 41 #include <sys/cdefs.h> 42 __KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.91 2008/02/29 20:35:23 yamt Exp $"); 43 44 #include "opt_uvmhist.h" 45 #include "opt_readahead.h" 46 #include "opt_pagermap.h" 47 48 #include <sys/param.h> 49 #include <sys/systm.h> 50 #include <sys/proc.h> 51 #include <sys/malloc.h> 52 #include <sys/pool.h> 53 #include <sys/vnode.h> 54 55 #include <uvm/uvm.h> 56 57 /* 58 * XXX 59 * this is needed until the device strategy interface 60 * is changed to do physically-addressed i/o. 61 */ 62 63 #ifndef PAGER_MAP_DEFAULT_SIZE 64 #define PAGER_MAP_DEFAULT_SIZE (16 * 1024 * 1024) 65 #endif 66 67 #ifndef PAGER_MAP_SIZE 68 #define PAGER_MAP_SIZE PAGER_MAP_DEFAULT_SIZE 69 #endif 70 71 size_t pager_map_size = PAGER_MAP_SIZE; 72 73 struct pool *uvm_aiobuf_pool; 74 75 /* 76 * list of uvm pagers in the system 77 */ 78 79 const struct uvm_pagerops * const uvmpagerops[] = { 80 &aobj_pager, 81 &uvm_deviceops, 82 &uvm_vnodeops, 83 &ubc_pager, 84 }; 85 86 /* 87 * the pager map: provides KVA for I/O 88 */ 89 90 struct vm_map *pager_map; /* XXX */ 91 kmutex_t pager_map_wanted_lock; 92 bool pager_map_wanted; /* locked by pager map */ 93 static vaddr_t emergva; 94 static bool emerginuse; 95 96 /* 97 * uvm_pager_init: init pagers (at boot time) 98 */ 99 100 void 101 uvm_pager_init(void) 102 { 103 u_int lcv; 104 vaddr_t sva, eva; 105 106 /* 107 * init pager map 108 */ 109 110 sva = 0; 111 pager_map = uvm_km_suballoc(kernel_map, &sva, &eva, pager_map_size, 0, 112 false, NULL); 113 mutex_init(&pager_map_wanted_lock, MUTEX_DEFAULT, IPL_NONE); 114 pager_map_wanted = false; 115 emergva = uvm_km_alloc(kernel_map, round_page(MAXPHYS), 0, 116 UVM_KMF_VAONLY); 117 #if defined(DEBUG) 118 if (emergva == 0) 119 panic("emergva"); 120 #endif 121 emerginuse = false; 122 123 /* 124 * init ASYNC I/O queue 125 */ 126 127 TAILQ_INIT(&uvm.aio_done); 128 129 /* 130 * call pager init functions 131 */ 132 for (lcv = 0 ; lcv < __arraycount(uvmpagerops); lcv++) { 133 if (uvmpagerops[lcv]->pgo_init) 134 uvmpagerops[lcv]->pgo_init(); 135 } 136 } 137 138 /* 139 * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings 140 * 141 * we basically just map in a blank map entry to reserve the space in the 142 * map and then use pmap_enter() to put the mappings in by hand. 143 */ 144 145 vaddr_t 146 uvm_pagermapin(struct vm_page **pps, int npages, int flags) 147 { 148 vsize_t size; 149 vaddr_t kva; 150 vaddr_t cva; 151 struct vm_page *pp; 152 vm_prot_t prot; 153 const bool pdaemon = curlwp == uvm.pagedaemon_lwp; 154 UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist); 155 156 UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d)", pps, npages,0,0); 157 158 /* 159 * compute protection. outgoing I/O only needs read 160 * access to the page, whereas incoming needs read/write. 161 */ 162 163 prot = VM_PROT_READ; 164 if (flags & UVMPAGER_MAPIN_READ) 165 prot |= VM_PROT_WRITE; 166 167 ReStart: 168 size = npages << PAGE_SHIFT; 169 kva = 0; /* let system choose VA */ 170 171 if (uvm_map(pager_map, &kva, size, NULL, UVM_UNKNOWN_OFFSET, 0, 172 UVM_FLAG_NOMERGE | (pdaemon ? UVM_FLAG_NOWAIT : 0)) != 0) { 173 if (pdaemon) { 174 mutex_enter(&pager_map_wanted_lock); 175 if (emerginuse) { 176 UVM_UNLOCK_AND_WAIT(&emergva, 177 &pager_map_wanted_lock, false, 178 "emergva", 0); 179 goto ReStart; 180 } 181 emerginuse = true; 182 mutex_exit(&pager_map_wanted_lock); 183 kva = emergva; 184 /* The shift implicitly truncates to PAGE_SIZE */ 185 KASSERT(npages <= (MAXPHYS >> PAGE_SHIFT)); 186 goto enter; 187 } 188 if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) { 189 UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0); 190 return(0); 191 } 192 mutex_enter(&pager_map_wanted_lock); 193 pager_map_wanted = true; 194 UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0); 195 UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, false, 196 "pager_map", 0); 197 goto ReStart; 198 } 199 200 enter: 201 /* got it */ 202 for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) { 203 pp = *pps++; 204 KASSERT(pp); 205 KASSERT(pp->flags & PG_BUSY); 206 pmap_kenter_pa(cva, VM_PAGE_TO_PHYS(pp), prot); 207 } 208 pmap_update(vm_map_pmap(pager_map)); 209 210 UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0); 211 return(kva); 212 } 213 214 /* 215 * uvm_pagermapout: remove pager_map mapping 216 * 217 * we remove our mappings by hand and then remove the mapping (waking 218 * up anyone wanting space). 219 */ 220 221 void 222 uvm_pagermapout(vaddr_t kva, int npages) 223 { 224 vsize_t size = npages << PAGE_SHIFT; 225 struct vm_map_entry *entries; 226 UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist); 227 228 UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0); 229 230 /* 231 * duplicate uvm_unmap, but add in pager_map_wanted handling. 232 */ 233 234 pmap_kremove(kva, npages << PAGE_SHIFT); 235 if (kva == emergva) { 236 mutex_enter(&pager_map_wanted_lock); 237 emerginuse = false; 238 wakeup(&emergva); 239 mutex_exit(&pager_map_wanted_lock); 240 return; 241 } 242 243 vm_map_lock(pager_map); 244 uvm_unmap_remove(pager_map, kva, kva + size, &entries, NULL, 0); 245 mutex_enter(&pager_map_wanted_lock); 246 if (pager_map_wanted) { 247 pager_map_wanted = false; 248 wakeup(pager_map); 249 } 250 mutex_exit(&pager_map_wanted_lock); 251 vm_map_unlock(pager_map); 252 if (entries) 253 uvm_unmap_detach(entries, 0); 254 pmap_update(pmap_kernel()); 255 UVMHIST_LOG(maphist,"<- done",0,0,0,0); 256 } 257 258 /* 259 * interrupt-context iodone handler for nested i/o bufs. 260 * 261 * => the buffer is private so need not be locked here 262 */ 263 264 void 265 uvm_aio_biodone1(struct buf *bp) 266 { 267 struct buf *mbp = bp->b_private; 268 269 KASSERT(mbp != bp); 270 if (bp->b_error != 0) { 271 mbp->b_error = bp->b_error; 272 } 273 mbp->b_resid -= bp->b_bcount; 274 putiobuf(bp); 275 if (mbp->b_resid == 0) { 276 biodone(mbp); 277 } 278 } 279 280 /* 281 * interrupt-context iodone handler for single-buf i/os 282 * or the top-level buf of a nested-buf i/o. 283 */ 284 285 void 286 uvm_aio_biodone(struct buf *bp) 287 { 288 /* reset b_iodone for when this is a single-buf i/o. */ 289 bp->b_iodone = uvm_aio_aiodone; 290 291 workqueue_enqueue(uvm.aiodone_queue, &bp->b_work, NULL); 292 } 293 294 void 295 uvm_aio_aiodone_pages(struct vm_page **pgs, int npages, bool write, int error) 296 { 297 struct uvm_object *uobj; 298 struct vm_page *pg; 299 kmutex_t *slock; 300 int pageout_done; 301 int swslot; 302 int i; 303 bool swap; 304 305 swslot = 0; 306 pageout_done = 0; 307 slock = NULL; 308 uobj = NULL; 309 pg = pgs[0]; 310 swap = (pg->uanon != NULL && pg->uobject == NULL) || 311 (pg->pqflags & PQ_AOBJ) != 0; 312 if (!swap) { 313 uobj = pg->uobject; 314 slock = &uobj->vmobjlock; 315 mutex_enter(slock); 316 mutex_enter(&uvm_pageqlock); 317 } else { 318 #if defined(VMSWAP) 319 if (error) { 320 if (pg->uobject != NULL) { 321 swslot = uao_find_swslot(pg->uobject, 322 pg->offset >> PAGE_SHIFT); 323 } else { 324 KASSERT(pg->uanon != NULL); 325 swslot = pg->uanon->an_swslot; 326 } 327 KASSERT(swslot); 328 } 329 #else /* defined(VMSWAP) */ 330 panic("%s: swap", __func__); 331 #endif /* defined(VMSWAP) */ 332 } 333 for (i = 0; i < npages; i++) { 334 pg = pgs[i]; 335 KASSERT(swap || pg->uobject == uobj); 336 UVMHIST_LOG(ubchist, "pg %p", pg, 0,0,0); 337 338 #if defined(VMSWAP) 339 /* 340 * for swap i/os, lock each page's object (or anon) 341 * individually since each page may need a different lock. 342 */ 343 344 if (swap) { 345 if (pg->uobject != NULL) { 346 slock = &pg->uobject->vmobjlock; 347 } else { 348 slock = &pg->uanon->an_lock; 349 } 350 mutex_enter(slock); 351 mutex_enter(&uvm_pageqlock); 352 } 353 #endif /* defined(VMSWAP) */ 354 355 /* 356 * process errors. for reads, just mark the page to be freed. 357 * for writes, if the error was ENOMEM, we assume this was 358 * a transient failure so we mark the page dirty so that 359 * we'll try to write it again later. for all other write 360 * errors, we assume the error is permanent, thus the data 361 * in the page is lost. bummer. 362 */ 363 364 if (error) { 365 int slot; 366 if (!write) { 367 pg->flags |= PG_RELEASED; 368 continue; 369 } else if (error == ENOMEM) { 370 if (pg->flags & PG_PAGEOUT) { 371 pg->flags &= ~PG_PAGEOUT; 372 pageout_done++; 373 } 374 pg->flags &= ~PG_CLEAN; 375 uvm_pageactivate(pg); 376 slot = 0; 377 } else 378 slot = SWSLOT_BAD; 379 380 #if defined(VMSWAP) 381 if (swap) { 382 if (pg->uobject != NULL) { 383 int oldslot; 384 oldslot = uao_set_swslot(pg->uobject, 385 pg->offset >> PAGE_SHIFT, slot); 386 KASSERT(oldslot == swslot + i); 387 } else { 388 KASSERT(pg->uanon->an_swslot == 389 swslot + i); 390 pg->uanon->an_swslot = slot; 391 } 392 } 393 #endif /* defined(VMSWAP) */ 394 } 395 396 /* 397 * if the page is PG_FAKE, this must have been a read to 398 * initialize the page. clear PG_FAKE and activate the page. 399 * we must also clear the pmap "modified" flag since it may 400 * still be set from the page's previous identity. 401 */ 402 403 if (pg->flags & PG_FAKE) { 404 KASSERT(!write); 405 pg->flags &= ~PG_FAKE; 406 #if defined(READAHEAD_STATS) 407 pg->pqflags |= PQ_READAHEAD; 408 uvm_ra_total.ev_count++; 409 #endif /* defined(READAHEAD_STATS) */ 410 KASSERT((pg->flags & PG_CLEAN) != 0); 411 uvm_pageenqueue(pg); 412 pmap_clear_modify(pg); 413 } 414 415 /* 416 * do accounting for pagedaemon i/o and arrange to free 417 * the pages instead of just unbusying them. 418 */ 419 420 if (pg->flags & PG_PAGEOUT) { 421 pg->flags &= ~PG_PAGEOUT; 422 pageout_done++; 423 uvmexp.pdfreed++; 424 pg->flags |= PG_RELEASED; 425 } 426 427 #if defined(VMSWAP) 428 /* 429 * for swap pages, unlock everything for this page now. 430 */ 431 432 if (swap) { 433 if (pg->uobject == NULL && pg->uanon->an_ref == 0 && 434 (pg->flags & PG_RELEASED) != 0) { 435 mutex_exit(&uvm_pageqlock); 436 uvm_anon_release(pg->uanon); 437 } else { 438 uvm_page_unbusy(&pg, 1); 439 mutex_exit(&uvm_pageqlock); 440 mutex_exit(slock); 441 } 442 } 443 #endif /* defined(VMSWAP) */ 444 } 445 uvm_pageout_done(pageout_done); 446 if (!swap) { 447 uvm_page_unbusy(pgs, npages); 448 mutex_exit(&uvm_pageqlock); 449 mutex_exit(slock); 450 } else { 451 #if defined(VMSWAP) 452 KASSERT(write); 453 454 /* these pages are now only in swap. */ 455 mutex_enter(&uvm_swap_data_lock); 456 KASSERT(uvmexp.swpgonly + npages <= uvmexp.swpginuse); 457 if (error != ENOMEM) 458 uvmexp.swpgonly += npages; 459 mutex_exit(&uvm_swap_data_lock); 460 if (error) { 461 if (error != ENOMEM) 462 uvm_swap_markbad(swslot, npages); 463 else 464 uvm_swap_free(swslot, npages); 465 } 466 uvmexp.pdpending--; 467 #endif /* defined(VMSWAP) */ 468 } 469 } 470 471 /* 472 * uvm_aio_aiodone: do iodone processing for async i/os. 473 * this should be called in thread context, not interrupt context. 474 */ 475 476 void 477 uvm_aio_aiodone(struct buf *bp) 478 { 479 int npages = bp->b_bufsize >> PAGE_SHIFT; 480 struct vm_page *pgs[npages]; 481 int i, error; 482 bool write; 483 UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(ubchist); 484 UVMHIST_LOG(ubchist, "bp %p", bp, 0,0,0); 485 486 error = bp->b_error; 487 write = (bp->b_flags & B_READ) == 0; 488 /* XXXUBC BC_NOCACHE is for swap pager, should be done differently */ 489 if (write && !(bp->b_cflags & BC_NOCACHE) && bioopsp != NULL) 490 (*bioopsp->io_pageiodone)(bp); 491 492 for (i = 0; i < npages; i++) { 493 pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT)); 494 UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0); 495 } 496 uvm_pagermapout((vaddr_t)bp->b_data, npages); 497 498 uvm_aio_aiodone_pages(pgs, npages, write, error); 499 500 if (write && (bp->b_cflags & BC_AGE) != 0) { 501 mutex_enter(bp->b_objlock); 502 vwakeup(bp); 503 mutex_exit(bp->b_objlock); 504 } 505 putiobuf(bp); 506 } 507 508 /* 509 * uvm_pageratop: convert KVAs in the pager map back to their page 510 * structures. 511 */ 512 513 struct vm_page * 514 uvm_pageratop(vaddr_t kva) 515 { 516 struct vm_page *pg; 517 paddr_t pa; 518 bool rv; 519 520 rv = pmap_extract(pmap_kernel(), kva, &pa); 521 KASSERT(rv); 522 pg = PHYS_TO_VM_PAGE(pa); 523 KASSERT(pg != NULL); 524 return (pg); 525 } 526