1 /* $NetBSD: uvm_pager.c,v 1.74 2006/02/11 12:45:07 yamt Exp $ */ 2 3 /* 4 * 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Charles D. Cranor and 19 * Washington University. 20 * 4. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * from: Id: uvm_pager.c,v 1.1.2.23 1998/02/02 20:38:06 chuck Exp 35 */ 36 37 /* 38 * uvm_pager.c: generic functions used to assist the pagers. 39 */ 40 41 #include <sys/cdefs.h> 42 __KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.74 2006/02/11 12:45:07 yamt Exp $"); 43 44 #include "opt_uvmhist.h" 45 #include "opt_readahead.h" 46 47 #include <sys/param.h> 48 #include <sys/systm.h> 49 #include <sys/proc.h> 50 #include <sys/malloc.h> 51 #include <sys/pool.h> 52 #include <sys/vnode.h> 53 54 #include <uvm/uvm.h> 55 56 struct pool *uvm_aiobuf_pool; 57 58 /* 59 * list of uvm pagers in the system 60 */ 61 62 struct uvm_pagerops * const uvmpagerops[] = { 63 &aobj_pager, 64 &uvm_deviceops, 65 &uvm_vnodeops, 66 &ubc_pager, 67 }; 68 69 /* 70 * the pager map: provides KVA for I/O 71 */ 72 73 struct vm_map *pager_map; /* XXX */ 74 struct simplelock pager_map_wanted_lock; 75 boolean_t pager_map_wanted; /* locked by pager map */ 76 static vaddr_t emergva; 77 static boolean_t emerginuse; 78 79 /* 80 * uvm_pager_init: init pagers (at boot time) 81 */ 82 83 void 84 uvm_pager_init(void) 85 { 86 u_int lcv; 87 vaddr_t sva, eva; 88 89 /* 90 * init pager map 91 */ 92 93 sva = 0; 94 pager_map = uvm_km_suballoc(kernel_map, &sva, &eva, PAGER_MAP_SIZE, 0, 95 FALSE, NULL); 96 simple_lock_init(&pager_map_wanted_lock); 97 pager_map_wanted = FALSE; 98 emergva = uvm_km_alloc(kernel_map, round_page(MAXPHYS), 0, 99 UVM_KMF_VAONLY); 100 #if defined(DEBUG) 101 if (emergva == 0) 102 panic("emergva"); 103 #endif 104 emerginuse = FALSE; 105 106 /* 107 * init ASYNC I/O queue 108 */ 109 110 TAILQ_INIT(&uvm.aio_done); 111 112 /* 113 * call pager init functions 114 */ 115 for (lcv = 0 ; lcv < sizeof(uvmpagerops)/sizeof(struct uvm_pagerops *); 116 lcv++) { 117 if (uvmpagerops[lcv]->pgo_init) 118 uvmpagerops[lcv]->pgo_init(); 119 } 120 } 121 122 /* 123 * uvm_pagermapin: map pages into KVA (pager_map) for I/O that needs mappings 124 * 125 * we basically just map in a blank map entry to reserve the space in the 126 * map and then use pmap_enter() to put the mappings in by hand. 127 */ 128 129 vaddr_t 130 uvm_pagermapin(struct vm_page **pps, int npages, int flags) 131 { 132 vsize_t size; 133 vaddr_t kva; 134 vaddr_t cva; 135 struct vm_page *pp; 136 vm_prot_t prot; 137 UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist); 138 139 UVMHIST_LOG(maphist,"(pps=0x%x, npages=%d)", pps, npages,0,0); 140 141 /* 142 * compute protection. outgoing I/O only needs read 143 * access to the page, whereas incoming needs read/write. 144 */ 145 146 prot = VM_PROT_READ; 147 if (flags & UVMPAGER_MAPIN_READ) 148 prot |= VM_PROT_WRITE; 149 150 ReStart: 151 size = npages << PAGE_SHIFT; 152 kva = 0; /* let system choose VA */ 153 154 if (uvm_map(pager_map, &kva, size, NULL, 155 UVM_UNKNOWN_OFFSET, 0, UVM_FLAG_NOMERGE) != 0) { 156 if (curproc == uvm.pagedaemon_proc) { 157 simple_lock(&pager_map_wanted_lock); 158 if (emerginuse) { 159 UVM_UNLOCK_AND_WAIT(&emergva, 160 &pager_map_wanted_lock, FALSE, 161 "emergva", 0); 162 goto ReStart; 163 } 164 emerginuse = TRUE; 165 simple_unlock(&pager_map_wanted_lock); 166 kva = emergva; 167 /* The shift implicitly truncates to PAGE_SIZE */ 168 KASSERT(npages <= (MAXPHYS >> PAGE_SHIFT)); 169 goto enter; 170 } 171 if ((flags & UVMPAGER_MAPIN_WAITOK) == 0) { 172 UVMHIST_LOG(maphist,"<- NOWAIT failed", 0,0,0,0); 173 return(0); 174 } 175 simple_lock(&pager_map_wanted_lock); 176 pager_map_wanted = TRUE; 177 UVMHIST_LOG(maphist, " SLEEPING on pager_map",0,0,0,0); 178 UVM_UNLOCK_AND_WAIT(pager_map, &pager_map_wanted_lock, FALSE, 179 "pager_map", 0); 180 goto ReStart; 181 } 182 183 enter: 184 /* got it */ 185 for (cva = kva ; size != 0 ; size -= PAGE_SIZE, cva += PAGE_SIZE) { 186 pp = *pps++; 187 KASSERT(pp); 188 KASSERT(pp->flags & PG_BUSY); 189 pmap_kenter_pa(cva, VM_PAGE_TO_PHYS(pp), prot); 190 } 191 pmap_update(vm_map_pmap(pager_map)); 192 193 UVMHIST_LOG(maphist, "<- done (KVA=0x%x)", kva,0,0,0); 194 return(kva); 195 } 196 197 /* 198 * uvm_pagermapout: remove pager_map mapping 199 * 200 * we remove our mappings by hand and then remove the mapping (waking 201 * up anyone wanting space). 202 */ 203 204 void 205 uvm_pagermapout(vaddr_t kva, int npages) 206 { 207 vsize_t size = npages << PAGE_SHIFT; 208 struct vm_map_entry *entries; 209 UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist); 210 211 UVMHIST_LOG(maphist, " (kva=0x%x, npages=%d)", kva, npages,0,0); 212 213 /* 214 * duplicate uvm_unmap, but add in pager_map_wanted handling. 215 */ 216 217 pmap_kremove(kva, npages << PAGE_SHIFT); 218 if (kva == emergva) { 219 simple_lock(&pager_map_wanted_lock); 220 emerginuse = FALSE; 221 wakeup(&emergva); 222 simple_unlock(&pager_map_wanted_lock); 223 return; 224 } 225 226 vm_map_lock(pager_map); 227 uvm_unmap_remove(pager_map, kva, kva + size, &entries, NULL, 0); 228 simple_lock(&pager_map_wanted_lock); 229 if (pager_map_wanted) { 230 pager_map_wanted = FALSE; 231 wakeup(pager_map); 232 } 233 simple_unlock(&pager_map_wanted_lock); 234 vm_map_unlock(pager_map); 235 if (entries) 236 uvm_unmap_detach(entries, 0); 237 pmap_update(pmap_kernel()); 238 UVMHIST_LOG(maphist,"<- done",0,0,0,0); 239 } 240 241 /* 242 * interrupt-context iodone handler for nested i/o bufs. 243 * 244 * => must be at splbio(). 245 */ 246 247 void 248 uvm_aio_biodone1(struct buf *bp) 249 { 250 struct buf *mbp = bp->b_private; 251 252 KASSERT(mbp != bp); 253 if (bp->b_flags & B_ERROR) { 254 mbp->b_flags |= B_ERROR; 255 mbp->b_error = bp->b_error; 256 } 257 mbp->b_resid -= bp->b_bcount; 258 putiobuf(bp); 259 if (mbp->b_resid == 0) { 260 biodone(mbp); 261 } 262 } 263 264 /* 265 * interrupt-context iodone handler for single-buf i/os 266 * or the top-level buf of a nested-buf i/o. 267 * 268 * => must be at splbio(). 269 */ 270 271 void 272 uvm_aio_biodone(struct buf *bp) 273 { 274 /* reset b_iodone for when this is a single-buf i/o. */ 275 bp->b_iodone = uvm_aio_aiodone; 276 277 simple_lock(&uvm.aiodoned_lock); /* locks uvm.aio_done */ 278 TAILQ_INSERT_TAIL(&uvm.aio_done, bp, b_freelist); 279 wakeup(&uvm.aiodoned); 280 simple_unlock(&uvm.aiodoned_lock); 281 } 282 283 /* 284 * uvm_aio_aiodone: do iodone processing for async i/os. 285 * this should be called in thread context, not interrupt context. 286 */ 287 288 void 289 uvm_aio_aiodone(struct buf *bp) 290 { 291 int npages = bp->b_bufsize >> PAGE_SHIFT; 292 struct vm_page *pg, *pgs[npages]; 293 struct uvm_object *uobj; 294 struct simplelock *slock; 295 int s, i, error, swslot; 296 boolean_t write, swap; 297 UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(ubchist); 298 UVMHIST_LOG(ubchist, "bp %p", bp, 0,0,0); 299 300 error = (bp->b_flags & B_ERROR) ? (bp->b_error ? bp->b_error : EIO) : 0; 301 write = (bp->b_flags & B_READ) == 0; 302 /* XXXUBC B_NOCACHE is for swap pager, should be done differently */ 303 if (write && !(bp->b_flags & B_NOCACHE) && bioops.io_pageiodone) { 304 (*bioops.io_pageiodone)(bp); 305 } 306 307 uobj = NULL; 308 for (i = 0; i < npages; i++) { 309 pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT)); 310 UVMHIST_LOG(ubchist, "pgs[%d] = %p", i, pgs[i],0,0); 311 } 312 uvm_pagermapout((vaddr_t)bp->b_data, npages); 313 314 swslot = 0; 315 slock = NULL; 316 pg = pgs[0]; 317 swap = (pg->uanon != NULL && pg->uobject == NULL) || 318 (pg->pqflags & PQ_AOBJ) != 0; 319 if (!swap) { 320 uobj = pg->uobject; 321 slock = &uobj->vmobjlock; 322 simple_lock(slock); 323 uvm_lock_pageq(); 324 } else { 325 #if defined(VMSWAP) 326 if (error) { 327 if (pg->uobject != NULL) { 328 swslot = uao_find_swslot(pg->uobject, 329 pg->offset >> PAGE_SHIFT); 330 } else { 331 swslot = pg->uanon->an_swslot; 332 } 333 KASSERT(swslot); 334 } 335 #else /* defined(VMSWAP) */ 336 panic("%s: swap", __func__); 337 #endif /* defined(VMSWAP) */ 338 } 339 for (i = 0; i < npages; i++) { 340 pg = pgs[i]; 341 KASSERT(swap || pg->uobject == uobj); 342 UVMHIST_LOG(ubchist, "pg %p", pg, 0,0,0); 343 344 #if defined(VMSWAP) 345 /* 346 * for swap i/os, lock each page's object (or anon) 347 * individually since each page may need a different lock. 348 */ 349 350 if (swap) { 351 if (pg->uobject != NULL) { 352 slock = &pg->uobject->vmobjlock; 353 } else { 354 slock = &pg->uanon->an_lock; 355 } 356 simple_lock(slock); 357 uvm_lock_pageq(); 358 } 359 #endif /* defined(VMSWAP) */ 360 361 /* 362 * process errors. for reads, just mark the page to be freed. 363 * for writes, if the error was ENOMEM, we assume this was 364 * a transient failure so we mark the page dirty so that 365 * we'll try to write it again later. for all other write 366 * errors, we assume the error is permanent, thus the data 367 * in the page is lost. bummer. 368 */ 369 370 if (error) { 371 int slot; 372 if (!write) { 373 pg->flags |= PG_RELEASED; 374 continue; 375 } else if (error == ENOMEM) { 376 if (pg->flags & PG_PAGEOUT) { 377 pg->flags &= ~PG_PAGEOUT; 378 uvmexp.paging--; 379 } 380 pg->flags &= ~PG_CLEAN; 381 uvm_pageactivate(pg); 382 slot = 0; 383 } else 384 slot = SWSLOT_BAD; 385 386 #if defined(VMSWAP) 387 if (swap) { 388 if (pg->uobject != NULL) { 389 int oldslot; 390 oldslot = uao_set_swslot(pg->uobject, 391 pg->offset >> PAGE_SHIFT, slot); 392 KASSERT(oldslot == swslot + i); 393 } else { 394 KASSERT(pg->uanon->an_swslot == 395 swslot + i); 396 pg->uanon->an_swslot = slot; 397 } 398 } 399 #endif /* defined(VMSWAP) */ 400 } 401 402 /* 403 * if the page is PG_FAKE, this must have been a read to 404 * initialize the page. clear PG_FAKE and activate the page. 405 * we must also clear the pmap "modified" flag since it may 406 * still be set from the page's previous identity. 407 */ 408 409 if (pg->flags & PG_FAKE) { 410 KASSERT(!write); 411 pg->flags &= ~PG_FAKE; 412 #if defined(READAHEAD_STATS) 413 pg->flags |= PG_SPECULATIVE; 414 uvm_ra_total.ev_count++; 415 #endif /* defined(READAHEAD_STATS) */ 416 uvm_pageactivate(pg); 417 pmap_clear_modify(pg); 418 } 419 420 /* 421 * do accounting for pagedaemon i/o and arrange to free 422 * the pages instead of just unbusying them. 423 */ 424 425 if (pg->flags & PG_PAGEOUT) { 426 pg->flags &= ~PG_PAGEOUT; 427 uvmexp.paging--; 428 uvmexp.pdfreed++; 429 pg->flags |= PG_RELEASED; 430 } 431 432 #if defined(VMSWAP) 433 /* 434 * for swap pages, unlock everything for this page now. 435 */ 436 437 if (swap) { 438 if (pg->uobject == NULL && pg->uanon->an_ref == 0 && 439 (pg->flags & PG_RELEASED) != 0) { 440 uvm_unlock_pageq(); 441 uvm_anon_release(pg->uanon); 442 } else { 443 uvm_page_unbusy(&pg, 1); 444 uvm_unlock_pageq(); 445 simple_unlock(slock); 446 } 447 } 448 #endif /* defined(VMSWAP) */ 449 } 450 if (!swap) { 451 uvm_page_unbusy(pgs, npages); 452 uvm_unlock_pageq(); 453 simple_unlock(slock); 454 } else { 455 #if defined(VMSWAP) 456 KASSERT(write); 457 458 /* these pages are now only in swap. */ 459 simple_lock(&uvm.swap_data_lock); 460 KASSERT(uvmexp.swpgonly + npages <= uvmexp.swpginuse); 461 if (error != ENOMEM) 462 uvmexp.swpgonly += npages; 463 simple_unlock(&uvm.swap_data_lock); 464 if (error) { 465 if (error != ENOMEM) 466 uvm_swap_markbad(swslot, npages); 467 else 468 uvm_swap_free(swslot, npages); 469 } 470 uvmexp.pdpending--; 471 #endif /* defined(VMSWAP) */ 472 } 473 s = splbio(); 474 if (write && (bp->b_flags & B_AGE) != 0) { 475 vwakeup(bp); 476 } 477 putiobuf(bp); 478 splx(s); 479 } 480 481 /* 482 * uvm_pageratop: convert KVAs in the pager map back to their page 483 * structures. 484 */ 485 486 struct vm_page * 487 uvm_pageratop(vaddr_t kva) 488 { 489 struct vm_page *pg; 490 paddr_t pa; 491 boolean_t rv; 492 493 rv = pmap_extract(pmap_kernel(), kva, &pa); 494 KASSERT(rv); 495 pg = PHYS_TO_VM_PAGE(pa); 496 KASSERT(pg != NULL); 497 return (pg); 498 } 499